├── .gitignore ├── src ├── main │ ├── resources │ │ └── libcephfs.jar │ └── java │ │ └── org │ │ └── apache │ │ └── hadoop │ │ └── fs │ │ └── ceph │ │ ├── CephFs.java │ │ ├── CephConfigKeys.java │ │ ├── CephFsProto.java │ │ ├── CephOutputStream.java │ │ ├── CephInputStream.java │ │ ├── CephTalker.java │ │ └── CephFileSystem.java └── test │ ├── resources │ ├── hadoop-common-3.0.0-SNAPSHOT.jar │ └── hadoop-common-3.0.0-SNAPSHOT-tests.jar │ └── java │ └── org │ └── apache │ └── hadoop │ └── fs │ ├── test │ ├── connector │ │ ├── HcfsTestConnectorInterface.java │ │ ├── HcfsTestConnector.java │ │ └── HcfsTestConnectorFactory.java │ └── unit │ │ ├── HcfsUmaskTest.java │ │ └── HcfsFileSystemTest.java │ └── contract │ └── cephfs │ ├── TestLocalFSContractSeek.java │ ├── TestLocalFSContractConcat.java │ ├── TestLocalFSContractOpen.java │ ├── TestLocalFSContractRename.java │ ├── TestLocalFSContractAppend.java │ ├── TestLocalFSContractDelete.java │ ├── TestLocalFSContractCreate.java │ ├── TestLocalFSContractMkdir.java │ ├── TestLocalFSContractRootDirectory.java │ ├── TestLocalFSContractLoaded.java │ └── CephFSContract.java ├── resources └── vagrant │ ├── ceph-repo-install.sh │ ├── dev-setup.sh │ ├── ceph-install.sh │ ├── ceph-fs-create.sh │ └── Vagrantfile ├── README.md ├── conf ├── core-site.xml ├── localfs.xml └── hadoop-env.sh ├── pom.xml └── LICENSE /.gitignore: -------------------------------------------------------------------------------- 1 | target 2 | -------------------------------------------------------------------------------- /src/main/resources/libcephfs.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ceph/cephfs-hadoop/master/src/main/resources/libcephfs.jar -------------------------------------------------------------------------------- /src/test/resources/hadoop-common-3.0.0-SNAPSHOT.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ceph/cephfs-hadoop/master/src/test/resources/hadoop-common-3.0.0-SNAPSHOT.jar -------------------------------------------------------------------------------- /src/test/resources/hadoop-common-3.0.0-SNAPSHOT-tests.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ceph/cephfs-hadoop/master/src/test/resources/hadoop-common-3.0.0-SNAPSHOT-tests.jar -------------------------------------------------------------------------------- /resources/vagrant/ceph-repo-install.sh: -------------------------------------------------------------------------------- 1 | cat >> /etc/yum.repos.d/ceph.repo << EOF 2 | [ceph-noarch] 3 | name=Ceph noarch packages 4 | baseurl=http://ceph.com/rpm-firefly/rhel6/noarch 5 | enabled=1 6 | gpgcheck=1 7 | type=rpm-md 8 | gpgkey=https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc 9 | EOF 10 | -------------------------------------------------------------------------------- /resources/vagrant/dev-setup.sh: -------------------------------------------------------------------------------- 1 | wget http://mirrors.gigenet.com/apache/maven/maven-3/3.0.5/binaries/apache-maven-3.0.5-bin.tar.gz 2 | tar -zxvf apache-maven-3.0.5-bin.tar.gz -C /opt/ 3 | ln -s /opt/apache-maven-3.0.5/bin/mvn /usr/bin/mvn 4 | 5 | echo "make sure to export JAVA_HOME=/usr/lib/jvm/java-1.6.0-openjdk-1.6.0.0.x86_64/" 6 | 7 | echo "Now building ceph-hadoop !!!!!!!!!!!!!" 8 | cd /ceph-hadoop/ 9 | 10 | mvn clean package 11 | -------------------------------------------------------------------------------- /resources/vagrant/ceph-install.sh: -------------------------------------------------------------------------------- 1 | sudo yum update && sudo yum install ceph-deploy 2 | yum install -y java-1.6.0-openjdk-devel.x86_64 3 | yum install -y ceph 4 | yum install -y ceph-release 5 | yum install -y ceph-deploy 6 | yum install -y ceph-common 7 | yum install -y ceph-devel 8 | yum install -y ceph-fuse 9 | yum install -y ceph-fuse libcephfs1.x86_64 rbd-fuse 10 | 11 | echo "Now installing the java bindings" 12 | 13 | yum install -y cephfs-java.x86_64 14 | -------------------------------------------------------------------------------- /src/test/java/org/apache/hadoop/fs/test/connector/HcfsTestConnectorInterface.java: -------------------------------------------------------------------------------- 1 | package org.apache.hadoop.fs.test.connector; 2 | 3 | import java.io.IOException; 4 | 5 | import org.apache.hadoop.conf.Configuration; 6 | import org.apache.hadoop.fs.FileSystem; 7 | 8 | 9 | /* generic interface for creating HCFS file sytems for testing purposes */ 10 | 11 | public interface HcfsTestConnectorInterface { 12 | 13 | /* return a fully configured instantiated file system for testing */ 14 | public FileSystem create() throws IOException; 15 | 16 | /* returns a configuration file with properties for a given FS */ 17 | public Configuration createConfiguration(); 18 | 19 | } 20 | -------------------------------------------------------------------------------- /src/test/java/org/apache/hadoop/fs/test/connector/HcfsTestConnector.java: -------------------------------------------------------------------------------- 1 | package org.apache.hadoop.fs.test.connector; 2 | 3 | import java.io.IOException; 4 | import java.io.InputStream; 5 | 6 | import org.apache.hadoop.conf.Configuration; 7 | import org.apache.hadoop.fs.FileSystem; 8 | 9 | 10 | /* 11 | * Generic HCFS file system test connector. 12 | * This test connector takes a fully qualified o.a.h.f.FileSystem implementor class 13 | * as an environment variable. 14 | * 15 | */ 16 | public class HcfsTestConnector implements HcfsTestConnectorInterface { 17 | 18 | public Configuration createConfiguration(){ 19 | Configuration c = new Configuration(); 20 | InputStream config = HcfsTestConnector.class.getClassLoader().getResourceAsStream("core-site.xml"); 21 | c.addResource(config); 22 | 23 | return c; 24 | } 25 | 26 | public FileSystem create() throws IOException{ 27 | return FileSystem.get(createConfiguration()); 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/fs/ceph/CephFs.java: -------------------------------------------------------------------------------- 1 | package org.apache.hadoop.fs.ceph; 2 | 3 | import org.apache.hadoop.conf.Configuration; 4 | import org.apache.hadoop.fs.DelegateToFileSystem; 5 | import org.apache.hadoop.fs.AbstractFileSystem; 6 | 7 | import java.io.IOException; 8 | import java.net.URI; 9 | import java.net.URISyntaxException; 10 | 11 | /** 12 | * The CephFs implementation of AbstractFileSystem. 13 | * This impl delegates to the old FileSystem 14 | */ 15 | public class CephFs extends DelegateToFileSystem { 16 | /** 17 | * This constructor has the signature needed by 18 | * {@link AbstractFileSystem#createFileSystem(URI, Configuration)}. 19 | * 20 | * @param theUri which must be that of localFs 21 | * @param conf 22 | * @throws IOException 23 | * @throws URISyntaxException 24 | */ 25 | CephFs(final URI theUri, final Configuration conf) throws IOException, 26 | URISyntaxException { 27 | super(theUri, new CephFileSystem(conf), conf, "ceph", true); 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /src/test/java/org/apache/hadoop/fs/test/connector/HcfsTestConnectorFactory.java: -------------------------------------------------------------------------------- 1 | package org.apache.hadoop.fs.test.connector; 2 | 3 | 4 | public class HcfsTestConnectorFactory { 5 | 6 | /* Loads an HCFS file system adapter via environment variable */ 7 | public static HcfsTestConnectorInterface getHcfsTestConnector() throws RuntimeException{ 8 | String testConnector = System.getProperty("HCFS_FILE_SYSTEM_CONNECTOR"); 9 | if(testConnector==null || "".equals(testConnector)){ 10 | testConnector = HcfsTestConnector.class.getCanonicalName(); 11 | } 12 | 13 | return getHcfsTestConnector(testConnector); 14 | } 15 | 16 | public static HcfsTestConnectorInterface getHcfsTestConnector(String hcfsName) throws RuntimeException{ 17 | try { 18 | return (HcfsTestConnectorInterface)Class.forName(hcfsName).newInstance(); 19 | } catch (Exception e) { 20 | throw new RuntimeException("Cannont instatiate HCFS File System from HCFS_FILE_SYSTEM env variable. Error:\n " + e); 21 | } 22 | 23 | } 24 | 25 | } 26 | -------------------------------------------------------------------------------- /src/test/java/org/apache/hadoop/fs/contract/cephfs/TestLocalFSContractSeek.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package org.apache.hadoop.fs.contract.cephfs; 20 | 21 | import org.apache.hadoop.conf.Configuration; 22 | import org.apache.hadoop.fs.contract.AbstractContractSeekTest; 23 | import org.apache.hadoop.fs.contract.AbstractFSContract; 24 | 25 | public class TestLocalFSContractSeek extends AbstractContractSeekTest { 26 | @Override 27 | protected AbstractFSContract createContract(Configuration conf) { 28 | return new CephFSContract(conf); 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /src/test/java/org/apache/hadoop/fs/contract/cephfs/TestLocalFSContractConcat.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package org.apache.hadoop.fs.contract.cephfs; 20 | 21 | import org.apache.hadoop.conf.Configuration; 22 | import org.apache.hadoop.fs.contract.AbstractContractConcatTest; 23 | import org.apache.hadoop.fs.contract.AbstractFSContract; 24 | 25 | public class TestLocalFSContractConcat extends AbstractContractConcatTest { 26 | @Override 27 | protected AbstractFSContract createContract(Configuration conf) { 28 | return new CephFSContract(conf); 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /src/test/java/org/apache/hadoop/fs/contract/cephfs/TestLocalFSContractOpen.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package org.apache.hadoop.fs.contract.cephfs; 20 | 21 | import org.apache.hadoop.conf.Configuration; 22 | import org.apache.hadoop.fs.contract.AbstractContractOpenTest; 23 | import org.apache.hadoop.fs.contract.AbstractFSContract; 24 | 25 | public class TestLocalFSContractOpen extends AbstractContractOpenTest { 26 | 27 | @Override 28 | protected AbstractFSContract createContract(Configuration conf) { 29 | return new CephFSContract(conf); 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /src/test/java/org/apache/hadoop/fs/contract/cephfs/TestLocalFSContractRename.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package org.apache.hadoop.fs.contract.cephfs; 20 | 21 | import org.apache.hadoop.conf.Configuration; 22 | import org.apache.hadoop.fs.contract.AbstractContractRenameTest; 23 | import org.apache.hadoop.fs.contract.AbstractFSContract; 24 | 25 | public class TestLocalFSContractRename extends AbstractContractRenameTest { 26 | @Override 27 | protected AbstractFSContract createContract(Configuration conf) { 28 | return new CephFSContract(conf); 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /src/test/java/org/apache/hadoop/fs/contract/cephfs/TestLocalFSContractAppend.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package org.apache.hadoop.fs.contract.cephfs; 20 | 21 | import org.apache.hadoop.conf.Configuration; 22 | import org.apache.hadoop.fs.contract.AbstractContractAppendTest; 23 | import org.apache.hadoop.fs.contract.AbstractFSContract; 24 | 25 | public class TestLocalFSContractAppend extends AbstractContractAppendTest { 26 | 27 | @Override 28 | protected AbstractFSContract createContract(Configuration conf) { 29 | return new CephFSContract(conf); 30 | } 31 | 32 | } 33 | -------------------------------------------------------------------------------- /src/test/java/org/apache/hadoop/fs/contract/cephfs/TestLocalFSContractDelete.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package org.apache.hadoop.fs.contract.cephfs; 20 | 21 | import org.apache.hadoop.conf.Configuration; 22 | import org.apache.hadoop.fs.contract.AbstractContractDeleteTest; 23 | import org.apache.hadoop.fs.contract.AbstractFSContract; 24 | 25 | public class TestLocalFSContractDelete extends AbstractContractDeleteTest { 26 | 27 | @Override 28 | protected AbstractFSContract createContract(Configuration conf) { 29 | return new CephFSContract(conf); 30 | } 31 | 32 | } 33 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | CephFS Hadoop Plugin! 2 | ===================== 3 | 4 | ## In a hurry ? ## 5 | 6 | - Install virtualbox and vagrant. 7 | - Make sure they are working correctly. 8 | 9 | Then just run: 10 | 11 | - cd ./resources/vagrant 12 | - vagrant up 13 | 14 | ## Wow ! How did you do that? Vagrant ## 15 | 16 | This repository contains the source code for the Hadoop FileSystem (HCFS) implementation on Ceph. 17 | 18 | In addition, for developers, it includes a Vagrant recipe for spinning up a Ceph 1 node cluster to test the plugin. 19 | 20 | The vagrant recipe 21 | 22 | - installs ceph-deploy, ceph, ceph-fuse, etc.. 23 | - installs the ceph java bindings 24 | - configures and sets up a single node cluster 25 | - creates a fuse mount in /mnt/ceph 26 | - installs maven 27 | - creates a shared directory for development (/ceph-hadoop) 28 | - creates a shared directory for vagrant setup (/vagrant) 29 | - installs custom HCFS jars for HADOOP-9361 30 | - finally runs the entire build, creates the jar, and runs unit tests. 31 | 32 | ## Learning the details ## 33 | 34 | To grok the details, just check out the Vagrantfile. In that file, we call 4 scripts (config.vm.provision). 35 | The java steps are summarized by the maven download and `mvn clean package` step. 36 | 37 | ## Publishing , deployment , and continuous integration ## 38 | 39 | This is all TBD. For now, we manually publish this jar to maven central, see pom.xml for details. 40 | -------------------------------------------------------------------------------- /src/test/java/org/apache/hadoop/fs/contract/cephfs/TestLocalFSContractCreate.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package org.apache.hadoop.fs.contract.cephfs; 20 | 21 | import org.apache.hadoop.conf.Configuration; 22 | import org.apache.hadoop.fs.contract.AbstractContractCreateTest; 23 | import org.apache.hadoop.fs.contract.AbstractFSContract; 24 | import org.junit.Test; 25 | 26 | public class TestLocalFSContractCreate extends AbstractContractCreateTest { 27 | @Override 28 | protected AbstractFSContract createContract(Configuration conf) { 29 | return new CephFSContract(conf); 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /src/test/java/org/apache/hadoop/fs/contract/cephfs/TestLocalFSContractMkdir.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package org.apache.hadoop.fs.contract.cephfs; 20 | 21 | import org.apache.hadoop.conf.Configuration; 22 | import org.apache.hadoop.fs.contract.AbstractContractMkdirTest; 23 | import org.apache.hadoop.fs.contract.AbstractFSContract; 24 | 25 | /** 26 | * Test dir operations on a the local FS. 27 | */ 28 | public class TestLocalFSContractMkdir extends AbstractContractMkdirTest { 29 | @Override 30 | protected AbstractFSContract createContract(Configuration conf) { 31 | return new CephFSContract(conf); 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /src/test/java/org/apache/hadoop/fs/contract/cephfs/TestLocalFSContractRootDirectory.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package org.apache.hadoop.fs.contract.cephfs; 20 | 21 | import org.apache.hadoop.conf.Configuration; 22 | import org.apache.hadoop.fs.contract.AbstractContractRootDirectoryTest; 23 | import org.apache.hadoop.fs.contract.AbstractFSContract; 24 | 25 | public class TestLocalFSContractRootDirectory extends AbstractContractRootDirectoryTest { 26 | @Override 27 | protected AbstractFSContract createContract(Configuration conf) { 28 | return new CephFSContract(conf); 29 | } 30 | 31 | @Override 32 | public void testRmEmptyRootDirNonRecursive() throws Throwable { 33 | System.out.println("skipped"); 34 | } 35 | 36 | @Override 37 | public void testRmRootRecursive() throws Throwable { 38 | System.out.println("skipped"); 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /conf/core-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 16 | 17 | 18 | 19 | 20 | 21 | fs.default.name 22 | ceph://localhost:6789/ 23 | 24 | 25 | fs.defaultFS 26 | ceph://localhost:6789/ 27 | 28 | 29 | ceph.conf.file 30 | /etc/ceph/ceph.conf 31 | 32 | 42 | 43 | ceph.data.pools 44 | data 45 | 46 | 47 | fs.AbstractFileSystem.ceph.impl 48 | org.apache.hadoop.fs.ceph.CephFs 49 | 50 | 51 | fs.ceph.impl 52 | org.apache.hadoop.fs.ceph.CephFileSystem 53 | 54 | 55 | 56 | 57 | -------------------------------------------------------------------------------- /src/test/java/org/apache/hadoop/fs/contract/cephfs/TestLocalFSContractLoaded.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package org.apache.hadoop.fs.contract.cephfs; 20 | 21 | import org.apache.hadoop.conf.Configuration; 22 | import org.apache.hadoop.fs.contract.AbstractFSContract; 23 | import org.apache.hadoop.fs.contract.AbstractFSContractTestBase; 24 | import org.junit.Test; 25 | 26 | import java.net.URL; 27 | 28 | /** 29 | * just here to make sure that the local.xml resource is actually loading 30 | */ 31 | public class TestLocalFSContractLoaded extends AbstractFSContractTestBase { 32 | 33 | @Override 34 | protected AbstractFSContract createContract(Configuration conf) { 35 | return new CephFSContract(conf); 36 | } 37 | 38 | @Test 39 | public void testContractWorks() throws Throwable { 40 | String key = getContract().getConfKey(SUPPORTS_ATOMIC_RENAME); 41 | assertNotNull("not set: " + key, getContract().getConf().get(key)); 42 | assertTrue("not true: " + key, 43 | getContract().isSupported(SUPPORTS_ATOMIC_RENAME, false)); 44 | } 45 | 46 | @Test 47 | public void testContractResourceOnClasspath() throws Throwable { 48 | URL url = this.getClass() 49 | .getClassLoader() 50 | .getResource(CephFSContract.CONTRACT_XML); 51 | assertNotNull("could not find contract resource", url); 52 | } 53 | } -------------------------------------------------------------------------------- /resources/vagrant/ceph-fs-create.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | if [ $# -lt 1 ]; then 3 | DIR=/etc/ceph 4 | else 5 | DIR=$1 6 | fi 7 | 8 | # get rid of process and directories leftovers 9 | pkill ceph-fuse || true 10 | pkill ceph-mon || true 11 | pkill ceph-osd || true 12 | pkill ceph-mds || true 13 | rm -fr $DIR 14 | 15 | # cluster wide parameters 16 | mkdir -p ${DIR}/log 17 | cat > $DIR/ceph.conf << EOF 18 | [global] 19 | fsid = $(uuidgen) 20 | osd crush chooseleaf type = 0 21 | run dir = ${DIR}/run 22 | auth cluster required = none 23 | auth service required = none 24 | auth client required = none 25 | osd pool default size = 1 26 | EOF 27 | 28 | export CEPH_ARGS="--conf ${DIR}/ceph.conf" 29 | 30 | # single monitor 31 | MON_DATA=${DIR}/mon 32 | mkdir -p $MON_DATA 33 | 34 | cat >> $DIR/ceph.conf << EOF 35 | [mon.0] 36 | log file = ${DIR}/log/mon.log 37 | chdir = "" 38 | mon cluster log file = ${DIR}/log/mon-cluster.log 39 | mon data = ${MON_DATA} 40 | mon addr = 127.0.0.1 41 | EOF 42 | 43 | ceph-mon --id 0 --mkfs --keyring /dev/null 44 | touch ${MON_DATA}/keyring 45 | ceph-mon --id 0 46 | 47 | # single osd 48 | OSD_DATA=${DIR}/osd 49 | mkdir ${OSD_DATA} 50 | 51 | cat >> $DIR/ceph.conf << EOF 52 | [osd.0] 53 | log file = ${DIR}/log/osd.log 54 | chdir = "" 55 | osd data = ${OSD_DATA} 56 | osd journal = ${OSD_DATA}.journal 57 | osd journal size = 100 58 | EOF 59 | 60 | MDS_DATA=${DIR}/mds 61 | mkdir ${MDS_DATA} 62 | cat >> $DIR/ceph.conf << EOF 63 | [mds.0] 64 | log file = ${DIR}/log/mds.log 65 | chdir = "" 66 | host = localhost 67 | EOF 68 | 69 | OSD_ID=$(ceph osd create) 70 | ceph osd crush add osd.${OSD_ID} 1 root=default host=localhost 71 | ceph-osd --id ${OSD_ID} --mkjournal --mkfs 72 | ceph-osd --id ${OSD_ID} 73 | 74 | ceph-mds -m 127.0.0.1:6789 -i ${OSD_ID} 75 | 76 | # check that it works 77 | rados --pool data put group /etc/group 78 | rados --pool data get group ${DIR}/group 79 | diff /etc/group ${DIR}/group 80 | ceph osd tree 81 | 82 | mkdir -p /mnt/ceph 83 | ceph-fuse -m 127.0.0.1:6789 /mnt/ceph/ 84 | mount 85 | 86 | # display usage instructions 87 | echo export CEPH_ARGS="'--conf ${DIR}/ceph.conf'" 88 | echo ceph osd tree 89 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/fs/ceph/CephConfigKeys.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | package org.apache.hadoop.fs.ceph; 19 | 20 | import org.apache.hadoop.fs.CommonConfigurationKeys; 21 | 22 | /** 23 | * Configuration key constants used by CephFileSystem. 24 | */ 25 | public class CephConfigKeys extends CommonConfigurationKeys { 26 | public static final String CEPH_OBJECT_SIZE_KEY = "ceph.object.size"; 27 | public static final long CEPH_OBJECT_SIZE_DEFAULT = 64*1024*1024; 28 | 29 | public static final String CEPH_CONF_FILE_KEY = "ceph.conf.file"; 30 | public static final String CEPH_CONF_FILE_DEFAULT = null; 31 | 32 | public static final String CEPH_CONF_OPTS_KEY = "ceph.conf.options"; 33 | public static final String CEPH_CONF_OPTS_DEFAULT = null; 34 | 35 | public static final String CEPH_REPLICATION_KEY = "ceph.replication"; 36 | public static final short CEPH_REPLICATION_DEFAULT = 3; 37 | 38 | public static final String CEPH_ROOT_DIR_KEY = "ceph.root.dir"; 39 | public static final String CEPH_ROOT_DIR_DEFAULT = null; 40 | 41 | public static final String CEPH_LOCALIZE_READS_KEY = "ceph.localize.reads"; 42 | public static final boolean CEPH_LOCALIZE_READS_DEFAULT = true; 43 | 44 | public static final String CEPH_DATA_POOLS_KEY = "ceph.data.pools"; 45 | public static final String CEPH_DATA_POOLS_DEFAULT = null; 46 | 47 | public static final String CEPH_AUTH_ID_KEY = "ceph.auth.id"; 48 | public static final String CEPH_AUTH_ID_DEFAULT = null; 49 | 50 | public static final String CEPH_AUTH_KEYFILE_KEY = "ceph.auth.keyfile"; 51 | public static final String CEPH_AUTH_KEYFILE_DEFAULT = null; 52 | 53 | public static final String CEPH_AUTH_KEYRING_KEY = "ceph.auth.keyring"; 54 | public static final String CEPH_AUTH_KEYRING_DEFAULT = null; 55 | 56 | public static final String CEPH_MON_ADDR_KEY = "ceph.mon.address"; 57 | public static final String CEPH_MON_ADDR_DEFAULT = null; 58 | 59 | public static final String CEPH_PORT = "ceph.port"; 60 | public static final int CEPH_PORT_DEFAULT = 6789; 61 | } 62 | -------------------------------------------------------------------------------- /src/test/java/org/apache/hadoop/fs/test/unit/HcfsUmaskTest.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2014 RedHat 3 | * 4 | * Copyright (c) 2014 Gluster, Inc. 5 | * This file is part of GlusterFS. 6 | * 7 | * Licensed under the Apache License, Version 2.0 8 | * (the "License"); you may not use this file except in compliance with 9 | * the License. You may obtain a copy of the License at 10 | * 11 | * http://www.apache.org/licenses/LICENSE-2.0 12 | * 13 | * Unless required by applicable law or agreed to in writing, software 14 | * distributed under the License is distributed on an "AS IS" BASIS, 15 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 16 | * implied. See the License for the specific language governing 17 | * permissions and limitations under the License. 18 | * 19 | */ 20 | 21 | package org.apache.hadoop.fs.test.unit; 22 | 23 | import static org.junit.Assert.assertEquals; 24 | import static org.junit.Assert.assertFalse; 25 | import static org.junit.Assert.assertTrue; 26 | 27 | import java.io.IOException; 28 | 29 | import org.apache.hadoop.fs.FSDataInputStream; 30 | import org.apache.hadoop.fs.FSDataOutputStream; 31 | import org.apache.hadoop.fs.FileStatus; 32 | import org.apache.hadoop.fs.FileSystem; 33 | import org.apache.hadoop.fs.Path; 34 | import org.apache.hadoop.conf.Configuration; 35 | import org.apache.hadoop.fs.contract.AbstractBondedFSContract; 36 | import org.apache.hadoop.fs.permission.FsAction; 37 | import org.apache.hadoop.fs.permission.FsPermission; 38 | import org.apache.hadoop.fs.test.connector.HcfsTestConnectorFactory; 39 | import org.apache.hadoop.fs.test.connector.HcfsTestConnectorInterface; 40 | import org.junit.After; 41 | import org.junit.AfterClass; 42 | import org.junit.Assert; 43 | import org.junit.BeforeClass; 44 | import org.junit.Test; 45 | 46 | /** 47 | * Unit test for HCFS classes. 48 | * 49 | */ 50 | public class HcfsUmaskTest{ 51 | 52 | static FileSystem fs ; 53 | 54 | @BeforeClass 55 | public static void setup() throws Exception { 56 | HcfsTestConnectorInterface connector = HcfsTestConnectorFactory.getHcfsTestConnector(); 57 | fs= connector.create(); 58 | } 59 | 60 | @AfterClass 61 | public static void after() throws IOException{ 62 | fs.close(); 63 | } 64 | 65 | @org.junit.Test 66 | public void testMkdirsWithUmask() throws Exception { 67 | Configuration conf = fs.getConf(); 68 | String oldUmask = conf.get("fs.permissions.umask-mode"); 69 | Path dir = new Path("dirUmask022"); 70 | conf.set("fs.permissions.umask-mode", "022"); 71 | assertTrue(fs.mkdirs(dir)); 72 | conf.set("fs.permissions.umask-mode", oldUmask); 73 | FileStatus status = fs.getFileStatus(dir); 74 | assertTrue(status.isDirectory()); 75 | assertEquals((short)0755, status.getPermission().toShort()); 76 | 77 | } 78 | } 79 | -------------------------------------------------------------------------------- /conf/localfs.xml: -------------------------------------------------------------------------------- 1 | 18 | 19 | 20 | 21 | 24 | 25 | 26 | fs.contract.is-case-sensitive 27 | true 28 | 29 | 30 | 31 | 32 | fs.contract.supports-unix-permissions 33 | true 34 | 35 | 36 | 39 | 40 | 41 | fs.contract.test.root-tests-enabled 42 | true 43 | 44 | 45 | 46 | fs.contract.test.random-seek-count 47 | 1000 48 | 49 | 50 | 51 | fs.contract.rename-creates-dest-dirs 52 | true 53 | 54 | 55 | 56 | fs.contract.rename-overwrites-dest 57 | true 58 | 59 | 60 | 61 | 64 | 65 | fs.contract.supports-append 66 | true 67 | 68 | 69 | 70 | fs.contract.supports-atomic-directory-delete 71 | true 72 | 73 | 74 | 75 | fs.contract.supports-atomic-rename 76 | true 77 | 78 | 79 | 80 | fs.contract.supports-block-locality 81 | true 82 | 83 | 84 | 85 | fs.contract.supports-concat 86 | false 87 | 88 | 89 | 90 | fs.contract.supports-seek 91 | true 92 | 93 | 94 | 95 | fs.contract.supports-seek-on-closed-file 96 | true 97 | 98 | 99 | 100 | 101 | fs.contract.rejects-seek-past-eof 102 | true 103 | 104 | 105 | 106 | fs.contract.supports-strict-exceptions 107 | false 108 | 109 | 110 | 111 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/fs/ceph/CephFsProto.java: -------------------------------------------------------------------------------- 1 | // -*- mode:Java; tab-width:2; c-basic-offset:2; indent-tabs-mode:t -*- 2 | 3 | /** 4 | * 5 | * Licensed under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 14 | * implied. See the License for the specific language governing 15 | * permissions and limitations under the License. 16 | * 17 | * 18 | * Abstract base class for communicating with a Ceph filesystem and its 19 | * C++ codebase from Java, or pretending to do so (for unit testing purposes). 20 | * As only the Ceph package should be using this directly, all methods 21 | * are protected. 22 | */ 23 | package org.apache.hadoop.fs.ceph; 24 | 25 | import java.io.IOException; 26 | import java.net.URI; 27 | import java.net.InetAddress; 28 | 29 | import org.apache.hadoop.fs.Path; 30 | import org.apache.hadoop.conf.Configuration; 31 | 32 | import com.ceph.fs.CephStat; 33 | import com.ceph.fs.CephPoolException; 34 | import com.ceph.fs.CephStatVFS; 35 | import com.ceph.crush.Bucket; 36 | import com.ceph.fs.CephFileExtent; 37 | 38 | abstract class CephFsProto { 39 | 40 | abstract void initialize(URI uri, Configuration conf) throws IOException; 41 | abstract int __open(Path path, int flags, int mode) throws IOException; 42 | abstract int open(Path path, int flags, int mode) throws IOException; 43 | abstract int open(Path path, int flags, int mode, int stripe_unit, 44 | int stripe_count, int object_size, String data_pool) throws IOException; 45 | abstract void fstat(int fd, CephStat stat) throws IOException; 46 | abstract void lstat(Path path, CephStat stat) throws IOException; 47 | abstract void statfs(Path path, CephStatVFS stat) throws IOException; 48 | abstract void unlink(Path path) throws IOException; 49 | abstract void rmdir(Path path) throws IOException; 50 | abstract String[] listdir(Path path) throws IOException; 51 | abstract void setattr(Path path, CephStat stat, int mask) throws IOException; 52 | abstract void chmod(Path path, int mode) throws IOException; 53 | abstract long lseek(int fd, long offset, int whence) throws IOException; 54 | abstract void close(int fd) throws IOException; 55 | abstract void shutdown() throws IOException; 56 | abstract void rename(Path src, Path dst) throws IOException; 57 | abstract short getDefaultReplication(); 58 | abstract short get_file_replication(Path path) throws IOException; 59 | abstract int write(int fd, byte[] buf, long size, long offset) throws IOException; 60 | abstract int read(int fd, byte[] buf, long size, long offset) throws IOException; 61 | abstract void mkdirs(Path path, int mode) throws IOException; 62 | abstract int get_stripe_unit_granularity(); 63 | abstract String get_file_pool_name(int fd); 64 | abstract int get_pool_id(String pool_name) throws IOException;; 65 | abstract int get_pool_replication(int poolid) throws IOException; 66 | abstract InetAddress get_osd_address(int osd) throws IOException; 67 | abstract Bucket[] get_osd_crush_location(int osd) throws IOException; 68 | abstract CephFileExtent get_file_extent(int fd, long offset) throws IOException; 69 | abstract void fsync(int fd) throws IOException; 70 | } 71 | -------------------------------------------------------------------------------- /conf/hadoop-env.sh: -------------------------------------------------------------------------------- 1 | # Copyright 2011 The Apache Software Foundation 2 | # 3 | # Licensed to the Apache Software Foundation (ASF) under one 4 | # or more contributor license agreements. See the NOTICE file 5 | # distributed with this work for additional information 6 | # regarding copyright ownership. The ASF licenses this file 7 | # to you under the Apache License, Version 2.0 (the 8 | # "License"); you may not use this file except in compliance 9 | # with the License. You may obtain a copy of the License at 10 | # 11 | # http://www.apache.org/licenses/LICENSE-2.0 12 | # 13 | # Unless required by applicable law or agreed to in writing, software 14 | # distributed under the License is distributed on an "AS IS" BASIS, 15 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 16 | # See the License for the specific language governing permissions and 17 | # limitations under the License. 18 | 19 | # Set Hadoop-specific environment variables here. 20 | 21 | # The only required environment variable is JAVA_HOME. All others are 22 | # optional. When running a distributed configuration it is best to 23 | # set JAVA_HOME in this file, so that it is correctly defined on 24 | # remote nodes. 25 | 26 | # The java implementation to use. 27 | export JAVA_HOME=${JAVA_HOME} 28 | 29 | # The jsvc implementation to use. Jsvc is required to run secure datanodes. 30 | #export JSVC_HOME=${JSVC_HOME} 31 | 32 | export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-"/etc/hadoop"} 33 | 34 | # Extra Java CLASSPATH elements. Automatically insert capacity-scheduler. 35 | for f in $HADOOP_HOME/contrib/capacity-scheduler/*.jar; do 36 | if [ "$HADOOP_CLASSPATH" ]; then 37 | export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:$f 38 | else 39 | export HADOOP_CLASSPATH=$f 40 | fi 41 | done 42 | 43 | # The maximum amount of heap to use, in MB. Default is 1000. 44 | #export HADOOP_HEAPSIZE= 45 | #export HADOOP_NAMENODE_INIT_HEAPSIZE="" 46 | 47 | # Extra Java runtime options. Empty by default. 48 | export HADOOP_OPTS="$HADOOP_OPTS -Djava.net.preferIPv4Stack=true -Djava.library.path=/usr/local/lib/" 49 | 50 | # Command specific options appended to HADOOP_OPTS when specified 51 | export HADOOP_NAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_NAMENODE_OPTS" 52 | export HADOOP_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,RFAS $HADOOP_DATANODE_OPTS" 53 | 54 | export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_SECONDARYNAMENODE_OPTS" 55 | 56 | # The following applies to multiple commands (fs, dfs, fsck, distcp etc) 57 | export HADOOP_CLIENT_OPTS="-Xmx512m $HADOOP_CLIENT_OPTS" 58 | #HADOOP_JAVA_PLATFORM_OPTS="-XX:-UsePerfData $HADOOP_JAVA_PLATFORM_OPTS" 59 | 60 | # On secure datanodes, user to run the datanode as after dropping privileges 61 | export HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER} 62 | 63 | # Where log files are stored. $HADOOP_HOME/logs by default. 64 | #export HADOOP_LOG_DIR=${HADOOP_LOG_DIR}/$USER 65 | 66 | # Where log files are stored in the secure data environment. 67 | export HADOOP_SECURE_DN_LOG_DIR=${HADOOP_LOG_DIR}/${HADOOP_HDFS_USER} 68 | 69 | # The directory where pid files are stored. /tmp by default. 70 | # NOTE: this should be set to a directory that can only be written to by 71 | # the user that will run the hadoop daemons. Otherwise there is the 72 | # potential for a symlink attack. 73 | export HADOOP_PID_DIR=${HADOOP_PID_DIR} 74 | export HADOOP_SECURE_DN_PID_DIR=${HADOOP_PID_DIR} 75 | 76 | # A string representing this instance of hadoop. $USER by default. 77 | export HADOOP_IDENT_STRING=$USER 78 | export LD_LIBRARY_PATH=/usr/local/lib/ 79 | -------------------------------------------------------------------------------- /src/test/java/org/apache/hadoop/fs/contract/cephfs/CephFSContract.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package org.apache.hadoop.fs.contract.cephfs; 20 | 21 | import java.io.IOException; 22 | 23 | import org.apache.hadoop.conf.Configuration; 24 | import org.apache.hadoop.fs.FileSystem; 25 | import org.apache.hadoop.fs.Path; 26 | import org.apache.hadoop.fs.contract.AbstractFSContract; 27 | import org.apache.hadoop.fs.contract.ContractOptions; 28 | import org.apache.hadoop.fs.contract.ContractTestUtils; 29 | import org.apache.hadoop.fs.test.connector.HcfsTestConnector; 30 | import org.apache.hadoop.util.Shell; 31 | 32 | /** 33 | * The contract of the Local filesystem. 34 | * This changes its feature set from platform for platform -the default 35 | * set is updated during initialization. 36 | * 37 | * This contract contains some override points, to permit 38 | * the raw local filesystem and other filesystems to subclass it. 39 | */ 40 | public class CephFSContract extends AbstractFSContract { 41 | 42 | public static final String CONTRACT_XML = "localfs.xml"; 43 | public static final String SYSPROP_TEST_BUILD_DATA = "test.build.data"; 44 | public static final String DEFAULT_TEST_BUILD_DATA_DIR = "test/build/data"; 45 | private FileSystem fs; 46 | 47 | public CephFSContract(Configuration conf) { 48 | super(conf); 49 | //insert the base features 50 | addConfResource(getContractXml()); 51 | } 52 | 53 | /** 54 | * Return the contract file for this filesystem 55 | * @return the XML 56 | */ 57 | protected String getContractXml() { 58 | return CONTRACT_XML; 59 | } 60 | 61 | @Override 62 | public void init() throws IOException { 63 | super.init(); 64 | fs = new HcfsTestConnector().create(); 65 | adjustContractToLocalEnvironment(); 66 | } 67 | 68 | /** 69 | * tweak some of the contract parameters based on the local system 70 | * state 71 | */ 72 | protected void adjustContractToLocalEnvironment() { 73 | if (Shell.WINDOWS) { 74 | //NTFS doesn't do case sensitivity, and its permissions are ACL-based 75 | getConf().setBoolean(getConfKey(ContractOptions.IS_CASE_SENSITIVE), false); 76 | getConf().setBoolean(getConfKey(ContractOptions.SUPPORTS_UNIX_PERMISSIONS), false); 77 | } else if (ContractTestUtils.isOSX()) { 78 | //OSX HFS+ is not case sensitive 79 | getConf().setBoolean(getConfKey(ContractOptions.IS_CASE_SENSITIVE), 80 | false); 81 | } 82 | } 83 | 84 | @Override 85 | public String getScheme() { 86 | return "ceph"; 87 | } 88 | 89 | @Override 90 | public Path getTestPath() { 91 | Path path = fs.makeQualified(new Path( 92 | getTestDataDir())); 93 | return path; 94 | } 95 | 96 | /** 97 | * Get the test data directory 98 | * @return the directory for test data 99 | */ 100 | protected String getTestDataDir() { 101 | return System.getProperty(SYSPROP_TEST_BUILD_DATA, DEFAULT_TEST_BUILD_DATA_DIR); 102 | } 103 | 104 | @Override 105 | public FileSystem getTestFileSystem() throws IOException { 106 | return fs; 107 | } 108 | } 109 | -------------------------------------------------------------------------------- /resources/vagrant/Vagrantfile: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby -*- 2 | # vi: set ft=ruby : 3 | 4 | # Vagrantfile API/syntax version. Don't touch unless you know what you're doing! 5 | VAGRANTFILE_API_VERSION = "2" 6 | 7 | Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| 8 | 9 | config.vm.provision "shell", path: "ceph-repo-install.sh" 10 | config.vm.provision "shell", path: "ceph-install.sh" 11 | config.vm.provision "shell", path: "ceph-fs-create.sh" 12 | config.vm.provision "shell", path: "dev-setup.sh" 13 | 14 | # All Vagrant configuration is done here. The most common configuration 15 | # options are documented and commented below. For a complete reference, 16 | # please see the online documentation at vagrantup.com. 17 | 18 | # Every Vagrant virtual environment requires a box to build off of. 19 | config.vm.box = "chef/centos-6.5" 20 | 21 | # Disable automatic box update checking. If you disable this, then 22 | # boxes will only be checked for updates when the user runs 23 | # `vagrant box outdated`. This is not recommended. 24 | # config.vm.box_check_update = false 25 | 26 | # Create a forwarded port mapping which allows access to a specific port 27 | # within the machine from a port on the host machine. In the example below, 28 | # accessing "localhost:8080" will access port 80 on the guest machine. 29 | # config.vm.network "forwarded_port", guest: 80, host: 8080 30 | 31 | # Create a private network, which allows host-only access to the machine 32 | # using a specific IP. 33 | # config.vm.network "private_network", ip: "192.168.33.10" 34 | 35 | # Create a public network, which generally matched to bridged network. 36 | # Bridged networks make the machine appear as another physical device on 37 | # your network. 38 | # config.vm.network "public_network" 39 | 40 | # If true, then any SSH connections made will enable agent forwarding. 41 | # Default value: false 42 | # config.ssh.forward_agent = true 43 | 44 | # Share an additional folder to the guest VM. The first argument is 45 | # the path on the host to the actual folder. The second argument is 46 | # the path on the guest to mount the folder. And the optional third 47 | # argument is a set of non-required options. 48 | config.vm.synced_folder "../../", "/ceph-hadoop" 49 | config.vm.synced_folder "./", "/vagrant" 50 | 51 | # Provider-specific configuration so you can fine-tune various 52 | # backing providers for Vagrant. These expose provider-specific options. 53 | # Example for VirtualBox: 54 | # 55 | config.vm.provider "virtualbox" do |vb| 56 | # # Don't boot with headless mode 57 | # vb.gui = true 58 | # 59 | # # Use VBoxManage to customize the VM. For example to change memory: 60 | vb.customize ["modifyvm", :id, "--memory", "3024"] 61 | end 62 | # 63 | # View the documentation for the provider you're using for more 64 | # information on available options. 65 | 66 | # 67 | # You can also configure and bootstrap a client to an existing 68 | # policy server: 69 | # 70 | # config.vm.provision "cfengine" do |cf| 71 | # cf.policy_server_address = "10.0.2.15" 72 | # end 73 | 74 | # Enable provisioning with Puppet stand alone. Puppet manifests 75 | # are contained in a directory path relative to this Vagrantfile. 76 | # You will need to create the manifests directory and a manifest in 77 | # the file default.pp in the manifests_path directory. 78 | # 79 | # config.vm.provision "puppet" do |puppet| 80 | # puppet.manifests_path = "manifests" 81 | # puppet.manifest_file = "site.pp" 82 | # end 83 | 84 | # Enable provisioning with chef solo, specifying a cookbooks path, roles 85 | # path, and data_bags path (all relative to this Vagrantfile), and adding 86 | # some recipes and/or roles. 87 | # 88 | # config.vm.provision "chef_solo" do |chef| 89 | # chef.cookbooks_path = "../my-recipes/cookbooks" 90 | # chef.roles_path = "../my-recipes/roles" 91 | # chef.data_bags_path = "../my-recipes/data_bags" 92 | # chef.add_recipe "mysql" 93 | # chef.add_role "web" 94 | # 95 | # # You may also specify custom JSON attributes: 96 | # chef.json = { mysql_password: "foo" } 97 | # end 98 | 99 | # Enable provisioning with chef server, specifying the chef server URL, 100 | # and the path to the validation key (relative to this Vagrantfile). 101 | # 102 | # The Opscode Platform uses HTTPS. Substitute your organization for 103 | # ORGNAME in the URL and validation key. 104 | # 105 | # If you have your own Chef Server, use the appropriate URL, which may be 106 | # HTTP instead of HTTPS depending on your configuration. Also change the 107 | # validation key to validation.pem. 108 | # 109 | # config.vm.provision "chef_client" do |chef| 110 | # chef.chef_server_url = "https://api.opscode.com/organizations/ORGNAME" 111 | # chef.validation_key_path = "ORGNAME-validator.pem" 112 | # end 113 | # 114 | # If you're using the Opscode platform, your validator client is 115 | # ORGNAME-validator, replacing ORGNAME with your organization name. 116 | # 117 | # If you have your own Chef Server, the default validation client name is 118 | # chef-validator, unless you changed the configuration. 119 | # 120 | # chef.validation_client_name = "ORGNAME-validator" 121 | end 122 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/fs/ceph/CephOutputStream.java: -------------------------------------------------------------------------------- 1 | // -*- mode:Java; tab-width:2; c-basic-offset:2; indent-tabs-mode:t -*- 2 | 3 | /** 4 | * 5 | * Licensed under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 14 | * implied. See the License for the specific language governing 15 | * permissions and limitations under the License. 16 | * 17 | * 18 | * Implements the Hadoop FS interfaces to allow applications to store 19 | * files in Ceph. 20 | */ 21 | 22 | package org.apache.hadoop.fs.ceph; 23 | 24 | 25 | import java.io.IOException; 26 | import java.io.OutputStream; 27 | 28 | import org.apache.commons.logging.Log; 29 | import org.apache.commons.logging.LogFactory; 30 | import org.apache.hadoop.conf.Configuration; 31 | import org.apache.hadoop.util.Progressable; 32 | 33 | import com.ceph.fs.CephMount; 34 | 35 | /** 36 | *

37 | * An {@link OutputStream} for a CephFileSystem and corresponding 38 | * Ceph instance. 39 | * 40 | * TODO: 41 | * - When libcephfs-jni supports ByteBuffer interface we can get rid of the 42 | * use of the buffer here to reduce memory copies and just use buffers in 43 | * libcephfs. Currently it might be useful to reduce JNI crossings, but not 44 | * much more. 45 | */ 46 | public class CephOutputStream extends OutputStream { 47 | private static final Log LOG = LogFactory.getLog(CephOutputStream.class); 48 | private boolean closed; 49 | 50 | private CephFsProto ceph; 51 | 52 | private int fileHandle; 53 | 54 | private byte[] buffer; 55 | private int bufUsed = 0; 56 | 57 | /** 58 | * Construct the CephOutputStream. 59 | * @param conf The FileSystem configuration. 60 | * @param fh The Ceph filehandle to connect to. 61 | */ 62 | public CephOutputStream(Configuration conf, CephFsProto cephfs, 63 | int fh, int bufferSize) { 64 | ceph = cephfs; 65 | fileHandle = fh; 66 | closed = false; 67 | buffer = new byte[1<<21]; 68 | } 69 | 70 | /** 71 | * Close the Ceph file handle if close() wasn't explicitly called. 72 | */ 73 | protected void finalize() throws Throwable { 74 | try { 75 | if (!closed) { 76 | close(); 77 | } 78 | } finally { 79 | super.finalize(); 80 | } 81 | } 82 | 83 | /** 84 | * Ensure that the stream is opened. 85 | */ 86 | private synchronized void checkOpen() throws IOException { 87 | if (closed) 88 | throw new IOException("operation on closed stream (fd=" + fileHandle + ")"); 89 | } 90 | 91 | /** 92 | * Get the current position in the file. 93 | * @return The file offset in bytes. 94 | */ 95 | public synchronized long getPos() throws IOException { 96 | checkOpen(); 97 | return ceph.lseek(fileHandle, 0, CephMount.SEEK_CUR); 98 | } 99 | 100 | @Override 101 | public synchronized void write(int b) throws IOException { 102 | byte buf[] = new byte[1]; 103 | buf[0] = (byte) b; 104 | write(buf, 0, 1); 105 | } 106 | 107 | @Override 108 | public synchronized void write(byte buf[], int off, int len) throws IOException { 109 | checkOpen(); 110 | 111 | while (len > 0) { 112 | int remaining = Math.min(len, buffer.length - bufUsed); 113 | System.arraycopy(buf, off, buffer, bufUsed, remaining); 114 | 115 | bufUsed += remaining; 116 | off += remaining; 117 | len -= remaining; 118 | 119 | if (buffer.length == bufUsed) 120 | flushBuffer(); 121 | } 122 | } 123 | 124 | /* 125 | * Moves data from the buffer into libcephfs. 126 | */ 127 | private synchronized void flushBuffer() throws IOException { 128 | if (bufUsed == 0) 129 | return; 130 | 131 | while (bufUsed > 0) { 132 | int ret = ceph.write(fileHandle, buffer, bufUsed, -1); 133 | if (ret < 0) 134 | throw new IOException("ceph.write: ret=" + ret); 135 | 136 | if (ret == bufUsed) { 137 | bufUsed = 0; 138 | return; 139 | } 140 | 141 | assert(ret > 0); 142 | assert(ret < bufUsed); 143 | 144 | /* 145 | * TODO: handle a partial write by shifting the remainder of the data in 146 | * the buffer back to the beginning and retrying the write. It would 147 | * probably be better to use a ByteBuffer 'view' here, and I believe 148 | * using a ByteBuffer has some other performance benefits but we'll 149 | * likely need to update the libcephfs-jni implementation. 150 | */ 151 | int remaining = bufUsed - ret; 152 | System.arraycopy(buffer, ret, buffer, 0, remaining); 153 | bufUsed -= ret; 154 | } 155 | 156 | assert(bufUsed == 0); 157 | } 158 | 159 | @Override 160 | public synchronized void flush() throws IOException { 161 | checkOpen(); 162 | flushBuffer(); // buffer -> libcephfs 163 | ceph.fsync(fileHandle); // libcephfs -> cluster 164 | } 165 | 166 | @Override 167 | public synchronized void close() throws IOException { 168 | checkOpen(); 169 | flush(); 170 | ceph.close(fileHandle); 171 | closed = true; 172 | } 173 | } 174 | -------------------------------------------------------------------------------- /pom.xml: -------------------------------------------------------------------------------- 1 | 3 | 4 | 4.0.0 5 | com.ceph.fs 6 | cephfs-hadoop 7 | jar 8 | 0.80.6 9 | cephfs-hadoop 10 | http://www.ceph.com 11 | CephFS for Hadoop. 12 | 13 | 14 | The Apache Software License, Version 2.0 15 | http://www.apache.org/licenses/LICENSE-2.0.txt 16 | 17 | 18 | 19 | 20 | Noah Watkins 21 | nwatkins@redhat.com 22 | Red Hat 23 | http://www.redhat.com 24 | 25 | 26 | Huamin Chen 27 | chenh@redhat.com 28 | Red Hat 29 | http://www.redhat.com 30 | 31 | 32 | Jay Vyas 33 | jvyas@redhat.com 34 | Red Hat 35 | http://www.redhat.com 36 | 37 | 38 | 39 | scm:git:git@github.com:ceph/cephfs-hadoop.git 40 | scm:git:git@github.com:ceph/cephfs-hadoop.git 41 | scm:git:git@github.com:ceph/cephfs-hadoop.git 42 | 43 | 44 | 45 | 46 | ossrh 47 | https://oss.sonatype.org/content/repositories/snapshots 48 | 49 | 50 | 51 | 52 | 2.4.0 53 | 0.80.5 54 | 55 | 56 | 57 | 58 | junit 59 | junit 60 | 4.9 61 | test 62 | 63 | 64 | 71 | 72 | com.rhbd.hcfs 73 | hadoop-common-latest-tests 74 | 0.1 75 | system 76 | ${project.basedir}/src/test/resources/hadoop-common-3.0.0-SNAPSHOT-tests.jar 77 | 78 | 79 | com.rhbd.hcfs 80 | hadoop-common-latest 81 | 0.1 82 | system 83 | ${project.basedir}/src/test/resources/hadoop-common-3.0.0-SNAPSHOT.jar 84 | 85 | 86 | 87 | 88 | org.apache.hadoop 89 | hadoop-common 90 | ${hadoop.version} 91 | 92 | 93 | 94 | com.ceph 95 | libcephfs 96 | ${libcephfs.version} 97 | 98 | 99 | 100 | 101 | 102 | 103 | conf 104 | 105 | 106 | 107 | 108 | org.apache.maven.plugins 109 | maven-compiler-plugin 110 | 3.1 111 | 112 | 113 | org.apache.maven.plugins 114 | maven-release-plugin 115 | 2.4.1 116 | 117 | 118 | org.apache.maven.plugins 119 | maven-source-plugin 120 | 121 | 122 | attach-sources 123 | 124 | jar 125 | 126 | 127 | 128 | 129 | 130 | org.apache.maven.plugins 131 | maven-javadoc-plugin 132 | 133 | 134 | attach-javadocs 135 | 136 | jar 137 | 138 | 139 | 140 | 141 | 142 | org.sonatype.plugins 143 | nexus-staging-maven-plugin 144 | 1.6.2 145 | true 146 | 147 | ossrh 148 | https://oss.sonatype.org/ 149 | true 150 | 151 | 152 | 153 | org.apache.maven.plugins 154 | maven-gpg-plugin 155 | 1.5 156 | 157 | 158 | sign-artifacts 159 | verify 160 | 161 | sign 162 | 163 | 164 | 165 | 166 | 167 | 168 | 169 | 170 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/fs/ceph/CephInputStream.java: -------------------------------------------------------------------------------- 1 | // -*- mode:Java; tab-width:2; c-basic-offset:2; indent-tabs-mode:t -*- 2 | 3 | /** 4 | * 5 | * Licensed under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 14 | * implied. See the License for the specific language governing 15 | * permissions and limitations under the License. 16 | * 17 | * 18 | * Implements the Hadoop FS interfaces to allow applications to store 19 | * files in Ceph. 20 | */ 21 | package org.apache.hadoop.fs.ceph; 22 | 23 | 24 | import java.io.IOException; 25 | 26 | import org.apache.commons.logging.Log; 27 | import org.apache.commons.logging.LogFactory; 28 | import org.apache.hadoop.conf.Configuration; 29 | import org.apache.hadoop.fs.FSInputStream; 30 | 31 | import com.ceph.fs.CephMount; 32 | 33 | /** 34 | *

35 | * An {@link FSInputStream} for a CephFileSystem and corresponding 36 | * Ceph instance. 37 | */ 38 | public class CephInputStream extends FSInputStream { 39 | private static final Log LOG = LogFactory.getLog(CephInputStream.class); 40 | private boolean closed; 41 | 42 | private int fileHandle; 43 | 44 | private long fileLength; 45 | 46 | private CephFsProto ceph; 47 | 48 | private byte[] buffer; 49 | private int bufPos = 0; 50 | private int bufValid = 0; 51 | private long cephPos = 0; 52 | 53 | /** 54 | * Create a new CephInputStream. 55 | * @param conf The system configuration. Unused. 56 | * @param fh The filehandle provided by Ceph to reference. 57 | * @param flength The current length of the file. If the length changes 58 | * you will need to close and re-open it to access the new data. 59 | */ 60 | public CephInputStream(Configuration conf, CephFsProto cephfs, 61 | int fh, long flength, int bufferSize) { 62 | // Whoever's calling the constructor is responsible for doing the actual ceph_open 63 | // call and providing the file handle. 64 | fileLength = flength; 65 | fileHandle = fh; 66 | closed = false; 67 | ceph = cephfs; 68 | buffer = new byte[1<<21]; 69 | LOG.debug( 70 | "CephInputStream constructor: initializing stream with fh " + fh 71 | + " and file length " + flength); 72 | 73 | } 74 | 75 | /** Ceph likes things to be closed before it shuts down, 76 | * so closing the IOStream stuff voluntarily in a finalizer is good 77 | */ 78 | protected void finalize() throws Throwable { 79 | try { 80 | if (!closed) { 81 | close(); 82 | } 83 | } finally { 84 | super.finalize(); 85 | } 86 | } 87 | 88 | private synchronized boolean fillBuffer() throws IOException { 89 | bufValid = ceph.read(fileHandle, buffer, buffer.length, -1); 90 | bufPos = 0; 91 | if (bufValid < 0) { 92 | int err = bufValid; 93 | 94 | bufValid = 0; 95 | // attempt to reset to old position. If it fails, too bad. 96 | ceph.lseek(fileHandle, cephPos, CephMount.SEEK_SET); 97 | throw new IOException("Failed to fill read buffer! Error code:" + err); 98 | } 99 | cephPos += bufValid; 100 | return (bufValid != 0); 101 | } 102 | 103 | /* 104 | * Get the current position of the stream. 105 | */ 106 | public synchronized long getPos() throws IOException { 107 | return cephPos - bufValid + bufPos; 108 | } 109 | 110 | /** 111 | * Find the number of bytes remaining in the file. 112 | */ 113 | @Override 114 | public synchronized int available() throws IOException { 115 | if (closed) 116 | throw new IOException("file is closed"); 117 | return (int) (fileLength - getPos()); 118 | } 119 | 120 | public synchronized void seek(long targetPos) throws IOException { 121 | LOG.trace( 122 | "CephInputStream.seek: Seeking to position " + targetPos + " on fd " 123 | + fileHandle); 124 | if (targetPos > fileLength) { 125 | throw new IOException( 126 | "CephInputStream.seek: failed seek to position " + targetPos 127 | + " on fd " + fileHandle + ": Cannot seek after EOF " + fileLength); 128 | } 129 | long oldPos = cephPos; 130 | 131 | cephPos = ceph.lseek(fileHandle, targetPos, CephMount.SEEK_SET); 132 | bufValid = 0; 133 | bufPos = 0; 134 | if (cephPos < 0) { 135 | cephPos = oldPos; 136 | throw new IOException("Ceph failed to seek to new position!"); 137 | } 138 | } 139 | 140 | /** 141 | * Failovers are handled by the Ceph code at a very low level; 142 | * if there are issues that can be solved by changing sources 143 | * they'll be dealt with before anybody even tries to call this method! 144 | * @return false. 145 | */ 146 | public synchronized boolean seekToNewSource(long targetPos) { 147 | return false; 148 | } 149 | 150 | /** 151 | * Read a byte from the file. 152 | * @return the next byte. 153 | */ 154 | @Override 155 | public synchronized int read() throws IOException { 156 | LOG.trace( 157 | "CephInputStream.read: Reading a single byte from fd " + fileHandle 158 | + " by calling general read function"); 159 | 160 | byte result[] = new byte[1]; 161 | 162 | if (getPos() >= fileLength) { 163 | return -1; 164 | } 165 | if (-1 == read(result, 0, 1)) { 166 | return -1; 167 | } 168 | if (result[0] < 0) { 169 | return 256 + (int) result[0]; 170 | } else { 171 | return result[0]; 172 | } 173 | } 174 | 175 | /** 176 | * Read a specified number of bytes from the file into a byte[]. 177 | * @param buf the byte array to read into. 178 | * @param off the offset to start at in the file 179 | * @param len the number of bytes to read 180 | * @return 0 if successful, otherwise an error code. 181 | * @throws IOException on bad input. 182 | */ 183 | @Override 184 | public synchronized int read(byte buf[], int off, int len) 185 | throws IOException { 186 | LOG.trace( 187 | "CephInputStream.read: Reading " + len + " bytes from fd " + fileHandle); 188 | 189 | if (closed) { 190 | throw new IOException( 191 | "CephInputStream.read: cannot read " + len + " bytes from fd " 192 | + fileHandle + ": stream closed"); 193 | } 194 | 195 | // ensure we're not past the end of the file 196 | if (getPos() >= fileLength) { 197 | LOG.debug( 198 | "CephInputStream.read: cannot read " + len + " bytes from fd " 199 | + fileHandle + ": current position is " + getPos() 200 | + " and file length is " + fileLength); 201 | 202 | return -1; 203 | } 204 | 205 | int totalRead = 0; 206 | int initialLen = len; 207 | int read; 208 | 209 | do { 210 | read = Math.min(len, bufValid - bufPos); 211 | try { 212 | System.arraycopy(buffer, bufPos, buf, off, read); 213 | } catch (IndexOutOfBoundsException ie) { 214 | throw new IOException( 215 | "CephInputStream.read: Indices out of bounds:" + "read length is " 216 | + len + ", buffer offset is " + off + ", and buffer size is " 217 | + buf.length); 218 | } catch (ArrayStoreException ae) { 219 | throw new IOException( 220 | "Uh-oh, CephInputStream failed to do an array" 221 | + "copy due to type mismatch..."); 222 | } catch (NullPointerException ne) { 223 | throw new IOException( 224 | "CephInputStream.read: cannot read " + len + "bytes from fd:" 225 | + fileHandle + ": buf is null"); 226 | } 227 | bufPos += read; 228 | len -= read; 229 | off += read; 230 | totalRead += read; 231 | } while (len > 0 && fillBuffer()); 232 | 233 | LOG.trace( 234 | "CephInputStream.read: Reading " + initialLen + " bytes from fd " 235 | + fileHandle + ": succeeded in reading " + totalRead + " bytes"); 236 | return totalRead; 237 | } 238 | 239 | /** 240 | * Close the CephInputStream and release the associated filehandle. 241 | */ 242 | @Override 243 | public void close() throws IOException { 244 | LOG.trace("CephOutputStream.close:enter"); 245 | if (!closed) { 246 | ceph.close(fileHandle); 247 | 248 | closed = true; 249 | LOG.trace("CephOutputStream.close:exit"); 250 | } 251 | } 252 | } 253 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/fs/ceph/CephTalker.java: -------------------------------------------------------------------------------- 1 | // -*- mode:Java; tab-width:2; c-basic-offset:2; indent-tabs-mode:t -*- 2 | 3 | /** 4 | * 5 | * Licensed under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 14 | * implied. See the License for the specific language governing 15 | * permissions and limitations under the License. 16 | * 17 | * 18 | * Wraps a number of native function calls to communicate with the Ceph 19 | * filesystem. 20 | */ 21 | package org.apache.hadoop.fs.ceph; 22 | 23 | import java.io.IOException; 24 | import java.net.URI; 25 | import java.io.FileNotFoundException; 26 | import java.util.Arrays; 27 | import java.net.InetAddress; 28 | 29 | import org.apache.hadoop.fs.Path; 30 | import org.apache.hadoop.conf.Configuration; 31 | import org.apache.commons.logging.Log; 32 | import org.apache.commons.lang.StringUtils; 33 | 34 | import com.ceph.fs.CephMount; 35 | import com.ceph.fs.CephStat; 36 | import com.ceph.fs.CephStatVFS; 37 | import com.ceph.fs.CephFileAlreadyExistsException; 38 | import com.ceph.fs.CephNotDirectoryException; 39 | import com.ceph.fs.CephPoolException; 40 | import com.ceph.crush.Bucket; 41 | import com.ceph.fs.CephFileExtent; 42 | 43 | class CephTalker extends CephFsProto { 44 | 45 | private CephMount mount; 46 | private short defaultReplication; 47 | 48 | public CephTalker(Configuration conf, Log log) { 49 | mount = null; 50 | } 51 | 52 | private String pathString(Path path) { 53 | if (null == path) { 54 | return "/"; 55 | } 56 | return path.toUri().getPath(); 57 | } 58 | 59 | void initialize(URI uri, Configuration conf) throws IOException { 60 | 61 | /* 62 | * Create mount with auth user id 63 | */ 64 | String user_id = conf.get( 65 | CephConfigKeys.CEPH_AUTH_ID_KEY, 66 | CephConfigKeys.CEPH_AUTH_ID_DEFAULT); 67 | mount = new CephMount(user_id); 68 | 69 | /* 70 | * Load a configuration file if specified 71 | */ 72 | String configfile = conf.get( 73 | CephConfigKeys.CEPH_CONF_FILE_KEY, 74 | CephConfigKeys.CEPH_CONF_FILE_DEFAULT); 75 | if (configfile != null) { 76 | mount.conf_read_file(configfile); 77 | } 78 | 79 | /* Set auth keyfile */ 80 | String keyfile = conf.get( 81 | CephConfigKeys.CEPH_AUTH_KEYFILE_KEY, 82 | CephConfigKeys.CEPH_AUTH_KEYFILE_DEFAULT); 83 | if (keyfile != null) 84 | mount.conf_set("keyfile", keyfile); 85 | 86 | /* Set auth keyring */ 87 | String keyring = conf.get( 88 | CephConfigKeys.CEPH_AUTH_KEYRING_KEY, 89 | CephConfigKeys.CEPH_AUTH_KEYRING_DEFAULT); 90 | if (keyring != null) 91 | mount.conf_set("keyring", keyring); 92 | 93 | /* Set monitor */ 94 | String mon_addr = null; 95 | String mon_host = uri.getHost(); 96 | int mon_port = uri.getPort(); 97 | if (mon_host != null && mon_port != -1) 98 | mon_addr = mon_host + ":" + mon_port; 99 | else { 100 | mon_addr = conf.get( 101 | CephConfigKeys.CEPH_MON_ADDR_KEY, 102 | CephConfigKeys.CEPH_MON_ADDR_DEFAULT); 103 | } 104 | if (mon_addr != null) 105 | mount.conf_set("mon_host", mon_addr); 106 | 107 | /* 108 | * Parse and set Ceph configuration options 109 | */ 110 | String configopts = conf.get( 111 | CephConfigKeys.CEPH_CONF_OPTS_KEY, 112 | CephConfigKeys.CEPH_CONF_OPTS_DEFAULT); 113 | if (configopts != null) { 114 | String[] options = configopts.split(","); 115 | for (String option : options) { 116 | String[] keyval = option.split("="); 117 | if (keyval.length != 2) { 118 | throw new IllegalArgumentException("Invalid Ceph option: " + option); 119 | } 120 | String key = keyval[0]; 121 | String val = keyval[1]; 122 | try { 123 | mount.conf_set(key, val); 124 | } catch (Exception e) { 125 | throw new IOException("Error setting Ceph option " + key + " = " + val); 126 | } 127 | } 128 | } 129 | 130 | /* 131 | * Get default replication from configuration. 132 | */ 133 | defaultReplication = (short)conf.getInt( 134 | CephConfigKeys.CEPH_REPLICATION_KEY, 135 | CephConfigKeys.CEPH_REPLICATION_DEFAULT); 136 | 137 | /* 138 | * Use a different root? 139 | */ 140 | String root = conf.get( 141 | CephConfigKeys.CEPH_ROOT_DIR_KEY, 142 | CephConfigKeys.CEPH_ROOT_DIR_DEFAULT); 143 | 144 | /* Actually mount the file system */ 145 | mount.mount(root); 146 | 147 | /* 148 | * Allow reads from replica objects? 149 | */ 150 | boolean localizeReads = conf.getBoolean( 151 | CephConfigKeys.CEPH_LOCALIZE_READS_KEY, 152 | CephConfigKeys.CEPH_LOCALIZE_READS_DEFAULT); 153 | mount.localize_reads(localizeReads); 154 | 155 | mount.chdir("/"); 156 | } 157 | 158 | /* 159 | * Open a file. Allows directories to be opened. used internally to get the 160 | * pool name. Hadoop doesn't allow directories to be opened, and that is 161 | * handled below. 162 | */ 163 | int __open(Path path, int flags, int mode) throws IOException { 164 | return mount.open(pathString(path), flags, mode); 165 | } 166 | 167 | /* 168 | * Open a file. Ceph will not complain if we open a directory, but this 169 | * isn't something that Hadoop expects and we should throw an exception in 170 | * this case. 171 | */ 172 | int open(Path path, int flags, int mode) throws IOException { 173 | int fd = __open(path, flags, mode); 174 | CephStat stat = new CephStat(); 175 | fstat(fd, stat); 176 | if (stat.isDir()) { 177 | mount.close(fd); 178 | throw new FileNotFoundException(); 179 | } 180 | return fd; 181 | } 182 | 183 | /* 184 | * Same as open(path, flags, mode) alternative, but takes custom striping 185 | * parameters that are used when a file is being created. 186 | */ 187 | int open(Path path, int flags, int mode, int stripe_unit, int stripe_count, 188 | int object_size, String data_pool) throws IOException { 189 | int fd = mount.open(pathString(path), flags, mode, stripe_unit, 190 | stripe_count, object_size, data_pool); 191 | CephStat stat = new CephStat(); 192 | fstat(fd, stat); 193 | if (stat.isDir()) { 194 | mount.close(fd); 195 | throw new FileNotFoundException(); 196 | } 197 | return fd; 198 | } 199 | 200 | 201 | void fstat(int fd, CephStat stat) throws IOException { 202 | mount.fstat(fd, stat); 203 | } 204 | 205 | void lstat(Path path, CephStat stat) throws IOException { 206 | try { 207 | mount.lstat(pathString(path), stat); 208 | } catch (CephNotDirectoryException e) { 209 | throw new FileNotFoundException(); 210 | } 211 | } 212 | 213 | void statfs(Path path, CephStatVFS stat) throws IOException { 214 | try { 215 | mount.statfs(pathString(path), stat); 216 | } catch (FileNotFoundException e) { 217 | throw new FileNotFoundException(); 218 | } 219 | 220 | } 221 | 222 | void rmdir(Path path) throws IOException { 223 | mount.rmdir(pathString(path)); 224 | } 225 | 226 | void unlink(Path path) throws IOException { 227 | mount.unlink(pathString(path)); 228 | } 229 | 230 | void rename(Path src, Path dst) throws IOException { 231 | mount.rename(pathString(src), pathString(dst)); 232 | } 233 | 234 | String[] listdir(Path path) throws IOException { 235 | CephStat stat = new CephStat(); 236 | try { 237 | mount.lstat(pathString(path), stat); 238 | } catch (FileNotFoundException e) { 239 | return null; 240 | } 241 | if (!stat.isDir()) 242 | return null; 243 | return mount.listdir(pathString(path)); 244 | } 245 | 246 | void mkdirs(Path path, int mode) throws IOException { 247 | mount.mkdirs(pathString(path), mode); 248 | } 249 | 250 | void close(int fd) throws IOException { 251 | mount.close(fd); 252 | } 253 | 254 | void chmod(Path path, int mode) throws IOException { 255 | mount.chmod(pathString(path), mode); 256 | } 257 | 258 | void shutdown() throws IOException { 259 | if (null != mount) 260 | mount.unmount(); 261 | mount = null; 262 | } 263 | 264 | short getDefaultReplication() { 265 | return defaultReplication; 266 | } 267 | 268 | short get_file_replication(Path path) throws IOException { 269 | CephStat stat = new CephStat(); 270 | mount.lstat(pathString(path), stat); 271 | int replication = 1; 272 | if (stat.isFile()) { 273 | int fd; 274 | /* 275 | * When we stat files we also retrieve the file replication, but the 276 | * current libcephfs interface requires we open the file first. Since we 277 | * might not have read permissions, we try with write permissions. It 278 | * would be better to have a variant that operated on paths. 279 | */ 280 | try { 281 | fd = mount.open(pathString(path), CephMount.O_RDONLY, 0); 282 | } catch (IOException e) { 283 | fd = mount.open(pathString(path), CephMount.O_WRONLY, 0); 284 | } 285 | replication = mount.get_file_replication(fd); 286 | mount.close(fd); 287 | } 288 | return (short)replication; 289 | } 290 | 291 | int get_stripe_unit_granularity() { 292 | return mount.get_stripe_unit_granularity(); 293 | } 294 | 295 | void setattr(Path path, CephStat stat, int mask) throws IOException { 296 | mount.setattr(pathString(path), stat, mask); 297 | } 298 | 299 | void fsync(int fd) throws IOException { 300 | mount.fsync(fd, false); 301 | } 302 | 303 | long lseek(int fd, long offset, int whence) throws IOException { 304 | return mount.lseek(fd, offset, whence); 305 | } 306 | 307 | int write(int fd, byte[] buf, long size, long offset) throws IOException { 308 | return (int)mount.write(fd, buf, size, offset); 309 | } 310 | 311 | int read(int fd, byte[] buf, long size, long offset) throws IOException { 312 | return (int)mount.read(fd, buf, size, offset); 313 | } 314 | 315 | String get_file_pool_name(int fd) { 316 | return mount.get_file_pool_name(fd); 317 | } 318 | 319 | int get_pool_id(String pool_name) throws IOException { 320 | try { 321 | return mount.get_pool_id(pool_name); 322 | } catch (CephPoolException e) { 323 | throw new IOException(); 324 | } 325 | } 326 | 327 | int get_pool_replication(int poolid) throws IOException { 328 | try { 329 | return mount.get_pool_replication(poolid); 330 | } catch (CephPoolException e) { 331 | throw new IOException(); 332 | } 333 | } 334 | 335 | InetAddress get_osd_address(int osd) throws IOException { 336 | return mount.get_osd_address(osd); 337 | } 338 | 339 | Bucket[] get_osd_crush_location(int osd) throws IOException { 340 | return mount.get_osd_crush_location(osd); 341 | } 342 | 343 | CephFileExtent get_file_extent(int fd, long offset) throws IOException { 344 | return mount.get_file_extent(fd, offset); 345 | } 346 | } 347 | -------------------------------------------------------------------------------- /src/test/java/org/apache/hadoop/fs/test/unit/HcfsFileSystemTest.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2014 RedHat 3 | * 4 | * Copyright (c) 2011 Gluster, Inc. 5 | * This file is part of GlusterFS. 6 | * 7 | * Licensed under the Apache License, Version 2.0 8 | * (the "License"); you may not use this file except in compliance with 9 | * the License. You may obtain a copy of the License at 10 | * 11 | * http://www.apache.org/licenses/LICENSE-2.0 12 | * 13 | * Unless required by applicable law or agreed to in writing, software 14 | * distributed under the License is distributed on an "AS IS" BASIS, 15 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 16 | * implied. See the License for the specific language governing 17 | * permissions and limitations under the License. 18 | * 19 | * 20 | * Base test class for GlusterFS + hadoop testing. 21 | * Requires existing/working gluster volume named "hadoop-gluster". 22 | * 23 | * The default volume name can be overridden with env variable gluster-volume 24 | * 25 | */ 26 | 27 | package org.apache.hadoop.fs.test.unit; 28 | 29 | import static org.junit.Assert.assertEquals; 30 | import static org.junit.Assert.assertFalse; 31 | import static org.junit.Assert.assertTrue; 32 | 33 | import java.io.IOException; 34 | import java.util.Iterator; 35 | 36 | import org.apache.hadoop.conf.Configuration; 37 | import org.apache.hadoop.fs.FSDataInputStream; 38 | import org.apache.hadoop.fs.FSDataOutputStream; 39 | import org.apache.hadoop.fs.FileStatus; 40 | import org.apache.hadoop.fs.FileSystem; 41 | import org.apache.hadoop.fs.LocatedFileStatus; 42 | import org.apache.hadoop.fs.Path; 43 | import org.apache.hadoop.fs.RemoteIterator; 44 | import org.apache.hadoop.fs.permission.FsAction; 45 | import org.apache.hadoop.fs.permission.FsPermission; 46 | import org.apache.hadoop.fs.test.connector.HcfsTestConnectorFactory; 47 | import org.apache.hadoop.fs.test.connector.HcfsTestConnectorInterface; 48 | import org.junit.After; 49 | import org.junit.AfterClass; 50 | import org.junit.Assert; 51 | import org.junit.BeforeClass; 52 | import org.junit.Test; 53 | 54 | /** 55 | * Unit test for HCFS classes. 56 | * 57 | */ 58 | public class HcfsFileSystemTest{ 59 | 60 | static FileSystem fs ; 61 | 62 | 63 | /** 64 | * See MAPREDUCE-5902 for context on why this test is critical 65 | * for ecosystem interoperability. 66 | */ 67 | @org.junit.Test 68 | public void testEncodedPaths() throws Exception { 69 | //FileSystem fs2 = FileSystem.getLocal(new Configuration()); 70 | FileSystem fs2 = fs; 71 | Path encodedFiles=new Path("/tmp/encodedTest"+System.currentTimeMillis()); 72 | fs2.mkdirs(encodedFiles); 73 | fs2.create(new Path(encodedFiles,"a")); 74 | fs2.create(new Path(encodedFiles,"a%2")); 75 | fs2.create(new Path(encodedFiles,"a%2a")); 76 | fs2.create(new Path(encodedFiles,"a%3a")); 77 | fs2.create(new Path(encodedFiles,"a%4a")); 78 | Assert.assertEquals(5, fs2.listStatus(encodedFiles).length); 79 | fs2.delete(encodedFiles, true); 80 | } 81 | 82 | @BeforeClass 83 | public static void setup() throws Exception { 84 | HcfsTestConnectorInterface connector = HcfsTestConnectorFactory.getHcfsTestConnector(); 85 | fs= connector.create(); 86 | } 87 | 88 | @AfterClass 89 | public static void after() throws IOException{ 90 | fs.close(); 91 | } 92 | 93 | 94 | @org.junit.Test 95 | public void testTolerantMkdirs() throws Exception{ 96 | Path longPath=new Path("a/b/c/d"); 97 | assertFalse(fs.exists(longPath)); 98 | fs.mkdirs(longPath); 99 | assertTrue(fs.exists(longPath)); 100 | fs.mkdirs(new Path("a")); 101 | assertTrue(fs.exists(longPath)); 102 | assertTrue(fs.exists(new Path("a"))); 103 | fs.mkdirs(new Path("a/b")); 104 | assertTrue(fs.exists(longPath)); 105 | assertTrue(fs.exists(new Path("a/b"))); 106 | fs.mkdirs(new Path("a/b/c")); 107 | assertTrue(fs.exists(longPath)); 108 | assertTrue(fs.exists(new Path("a/b/c"))); 109 | 110 | /* delete the directories */ 111 | 112 | fs.delete(new Path("a"), true); 113 | assertFalse(fs.exists(longPath)); 114 | 115 | } 116 | 117 | /** 118 | * BZ908898 : Test that confirms that ownership is preserved in gluster 119 | * FileStatus. 120 | */ 121 | @org.junit.Test 122 | public void testOwner() throws Exception{ 123 | final String me=System.getProperties().getProperty("user.name"); 124 | Path myFile=new Path("to_owned_by_me.txt"); 125 | fs.create(myFile); 126 | Assert.assertEquals(fs.getFileStatus(myFile).getOwner(), me); 127 | fs.delete(myFile); 128 | } 129 | 130 | @org.junit.Test 131 | public void testTextWriteAndRead() throws Exception{ 132 | 133 | String testString="Is there anyone out there?"; 134 | String readChars=null; 135 | 136 | FSDataOutputStream dfsOut=null; 137 | dfsOut=fs.create(new Path("test1.txt")); 138 | dfsOut.writeUTF(testString); 139 | dfsOut.close(); 140 | 141 | FSDataInputStream dfsin=null; 142 | 143 | dfsin=fs.open(new Path("test1.txt")); 144 | readChars=dfsin.readUTF(); 145 | dfsin.close(); 146 | 147 | assertEquals(testString, readChars); 148 | 149 | fs.delete(new Path("test1.txt"), true); 150 | 151 | assertFalse(fs.exists(new Path("test1"))); 152 | } 153 | 154 | @org.junit.Test 155 | public void testPermissions() throws Exception{ 156 | 157 | Path myFile=new Path("filePerm.txt"); 158 | fs.create(myFile); 159 | short perm=0777; 160 | fs.setPermission(myFile, new FsPermission(perm)); 161 | assertEquals(fs.getFileStatus(myFile).getPermission().toShort(), perm); 162 | 163 | perm=0700; 164 | fs.setPermission(myFile, new FsPermission(perm)); 165 | assertEquals(fs.getFileStatus(myFile).getPermission().toShort(), perm); 166 | 167 | fs.delete(myFile); 168 | assertFalse(fs.exists(myFile)); 169 | 170 | /* directory permissions */ 171 | Path directory = new Path("aa/bb/cc"); 172 | perm = 0700; 173 | fs.mkdirs(directory, new FsPermission(perm)); 174 | assertEquals(fs.getFileStatus(directory).getPermission().toShort(), perm); 175 | fs.delete(new Path("aa"),true); 176 | assertFalse(fs.exists(directory)); 177 | 178 | 179 | perm = 0777; 180 | fs.mkdirs(directory, new FsPermission(perm)); 181 | assertEquals(fs.getFileStatus(directory).getPermission().toShort(), perm); 182 | fs.delete(new Path("aa"),true); 183 | assertFalse(fs.exists(directory)); 184 | } 185 | 186 | @org.junit.Test 187 | public void testZDirs() throws Exception{ 188 | final Path subDir1=new Path("td_dir.1"); 189 | final Path baseDir=new Path("td_testDirs1"); 190 | final Path test1=new Path("td_test1"); 191 | final Path test2=new Path("td_test/dir.2"); 192 | 193 | assertFalse(fs.exists(baseDir)); 194 | assertFalse(fs.isDirectory(baseDir)); 195 | 196 | // make the dir 197 | fs.mkdirs(baseDir); 198 | 199 | assertTrue(fs.isDirectory(baseDir)); 200 | // fs.setWorkingDirectory(baseDir); 201 | 202 | fs.mkdirs(subDir1); 203 | 204 | assertTrue(fs.isDirectory(subDir1)); 205 | 206 | assertFalse(fs.exists(test1)); 207 | 208 | assertFalse(fs.isDirectory(test2)); 209 | 210 | fs.create(new Path(baseDir, "dummyfile")); 211 | FileStatus[] p=fs.listStatus(baseDir); 212 | assertEquals(p.length, 1); 213 | 214 | fs.delete(baseDir, true); 215 | assertFalse(fs.exists(baseDir)); 216 | 217 | fs.delete(subDir1, true); 218 | assertFalse(fs.exists(subDir1)); 219 | 220 | 221 | fs.delete(baseDir); 222 | fs.delete(test1); 223 | fs.delete(test2); 224 | } 225 | 226 | @org.junit.Test 227 | public void testFiles() throws Exception{ 228 | 229 | Path subDir1=new Path("tf_dir.1"); 230 | Path baseDir=new Path("tf_testDirs1"); 231 | Path file1=new Path("tf_dir.1/foo.1"); 232 | Path file2=new Path("tf_dir.1/foo.2"); 233 | 234 | fs.mkdirs(baseDir); 235 | assertTrue(fs.isDirectory(baseDir)); 236 | // fs.setWorkingDirectory(baseDir); 237 | 238 | fs.mkdirs(subDir1); 239 | 240 | FSDataOutputStream s1=fs.create(file1, true, 4096, (short) 1, (long) 4096, null); 241 | FSDataOutputStream s2=fs.create(file2, true, 4096, (short) 1, (long) 4096, null); 242 | 243 | s1.close(); 244 | s2.close(); 245 | 246 | FileStatus[] p=fs.listStatus(subDir1); 247 | assertEquals(p.length, 2); 248 | 249 | fs.delete(file1, true); 250 | p=fs.listStatus(subDir1); 251 | assertEquals(p.length, 1); 252 | 253 | fs.delete(file2, true); 254 | p=fs.listStatus(subDir1); 255 | assertEquals(p.length, 0); 256 | 257 | fs.delete(baseDir, true); 258 | assertFalse(fs.exists(baseDir)); 259 | 260 | fs.delete(subDir1); 261 | fs.delete(file1); 262 | fs.delete(file2); 263 | } 264 | 265 | public void testFileIO() throws Exception{ 266 | 267 | Path subDir1=new Path("tfio_dir.1"); 268 | Path file1=new Path("tfio_dir.1/foo.1"); 269 | Path baseDir=new Path("tfio_testDirs1"); 270 | 271 | fs.mkdirs(baseDir); 272 | assertTrue(fs.isDirectory(baseDir)); 273 | // fs.setWorkingDirectory(baseDir); 274 | 275 | fs.mkdirs(subDir1); 276 | 277 | FSDataOutputStream s1=fs.create(file1, true, 4096, (short) 1, (long) 4096, null); 278 | 279 | int bufsz=4096; 280 | byte[] data=new byte[bufsz]; 281 | 282 | for(int i=0;i datapools = null; 74 | 75 | /** 76 | * Create a new CephFileSystem. 77 | */ 78 | public CephFileSystem() { 79 | } 80 | 81 | /** 82 | * Create a new CephFileSystem. 83 | */ 84 | public CephFileSystem(Configuration conf) { 85 | setConf(conf); 86 | } 87 | 88 | /** 89 | * Create an absolute path using the working directory. 90 | */ 91 | private Path makeAbsolute(Path path) { 92 | if (path.isAbsolute()) { 93 | return path; 94 | } 95 | return new Path(workingDir, path); 96 | } 97 | 98 | public URI getUri() { 99 | return uri; 100 | } 101 | 102 | @Override 103 | public void initialize(URI uri, Configuration conf) throws IOException { 104 | super.initialize(uri, conf); 105 | if (ceph == null) { 106 | ceph = new CephTalker(conf, LOG); 107 | } 108 | ceph.initialize(uri, conf); 109 | setConf(conf); 110 | this.uri = URI.create(uri.getScheme() + "://" + uri.getAuthority()); 111 | this.workingDir = getHomeDirectory(); 112 | } 113 | 114 | /** 115 | * Open a Ceph file and attach the file handle to an FSDataInputStream. 116 | * @param path The file to open 117 | * @param bufferSize Ceph does internal buffering; but you can buffer in 118 | * the Java code too if you like. 119 | * @return FSDataInputStream reading from the given path. 120 | * @throws IOException if the path DNE or is a 121 | * directory, or there is an error getting data to set up the FSDataInputStream. 122 | */ 123 | public FSDataInputStream open(Path path, int bufferSize) throws IOException { 124 | path = makeAbsolute(path); 125 | 126 | // throws filenotfoundexception if path is a directory 127 | int fd = ceph.open(path, CephMount.O_RDONLY, 0); 128 | 129 | /* get file size */ 130 | CephStat stat = new CephStat(); 131 | ceph.fstat(fd, stat); 132 | 133 | CephInputStream istream = new CephInputStream(getConf(), ceph, fd, 134 | stat.size, bufferSize); 135 | return new FSDataInputStream(istream); 136 | } 137 | 138 | /** 139 | * Close down the CephFileSystem. Runs the base-class close method 140 | * and then kills the Ceph client itself. 141 | */ 142 | @Override 143 | public void close() throws IOException { 144 | super.close(); // this method does stuff, make sure it's run! 145 | ceph.shutdown(); 146 | } 147 | 148 | /** 149 | * Get an FSDataOutputStream to append onto a file. 150 | * @param path The File you want to append onto 151 | * @param bufferSize Ceph does internal buffering but you can buffer in the Java code as well if you like. 152 | * @param progress The Progressable to report progress to. 153 | * Reporting is limited but exists. 154 | * @return An FSDataOutputStream that connects to the file on Ceph. 155 | * @throws IOException If the file cannot be found or appended to. 156 | */ 157 | public FSDataOutputStream append(Path path, int bufferSize, 158 | Progressable progress) throws IOException { 159 | path = makeAbsolute(path); 160 | 161 | if (progress != null) { 162 | progress.progress(); 163 | } 164 | 165 | int fd = ceph.open(path, CephMount.O_WRONLY|CephMount.O_APPEND, 0); 166 | 167 | if (progress != null) { 168 | progress.progress(); 169 | } 170 | 171 | CephOutputStream ostream = new CephOutputStream(getConf(), ceph, fd, 172 | bufferSize); 173 | return new FSDataOutputStream(ostream, statistics); 174 | } 175 | 176 | public Path getWorkingDirectory() { 177 | return workingDir; 178 | } 179 | 180 | @Override 181 | public void setWorkingDirectory(Path dir) { 182 | workingDir = makeAbsolute(dir); 183 | } 184 | 185 | /** 186 | * Create a directory and any nonexistent parents. Any portion 187 | * of the directory tree can exist without error. 188 | * @param path The directory path to create 189 | * @param perms The permissions to apply to the created directories. 190 | * @return true if successful, false otherwise 191 | * @throws IOException if the path is a child of a file. 192 | */ 193 | @Override 194 | public boolean mkdirs(Path path, FsPermission perms) throws IOException { 195 | path = makeAbsolute(path); 196 | 197 | boolean result = false; 198 | try { 199 | ceph.mkdirs(path, (int) perms.toShort()); 200 | result = true; 201 | } catch (CephFileAlreadyExistsException e) { 202 | result = true; 203 | } 204 | 205 | return result; 206 | } 207 | 208 | /** 209 | * Create a directory and any nonexistent parents. Any portion 210 | * of the directory tree can exist without error. 211 | * Apply umask from conf 212 | * @param f The directory path to create 213 | * @return true if successful, false otherwise 214 | * @throws IOException if the path is a child of a file. 215 | */ 216 | @Override 217 | public boolean mkdirs(Path f) throws IOException { 218 | return mkdirs(f, FsPermission.getDirDefault().applyUMask(FsPermission.getUMask(getConf()))); 219 | } 220 | 221 | /** 222 | * Get stat information on a file. This does not fill owner or group, as 223 | * Ceph's support for these is a bit different than HDFS'. 224 | * @param path The path to stat. 225 | * @return FileStatus object containing the stat information. 226 | * @throws FileNotFoundException if the path could not be resolved. 227 | */ 228 | public FileStatus getFileStatus(Path path) throws IOException { 229 | path = makeAbsolute(path); 230 | 231 | CephStat stat = new CephStat(); 232 | ceph.lstat(path, stat); 233 | 234 | FileStatus status = new FileStatus(stat.size, stat.isDir(), 235 | ceph.get_file_replication(path), stat.blksize, stat.m_time, 236 | stat.a_time, new FsPermission((short) stat.mode), 237 | System.getProperty("user.name"), null, path.makeQualified(this)); 238 | 239 | return status; 240 | } 241 | 242 | /** 243 | * Get the FileStatus for each listing in a directory. 244 | * @param path The directory to get listings from. 245 | * @return FileStatus[] containing one FileStatus for each directory listing; 246 | * null if path does not exist. 247 | */ 248 | public FileStatus[] listStatus(Path path) throws IOException { 249 | path = makeAbsolute(path); 250 | 251 | if (isFile(path)) 252 | return new FileStatus[] { getFileStatus(path) }; 253 | 254 | String[] dirlist = ceph.listdir(path); 255 | if (dirlist != null) { 256 | FileStatus[] status = new FileStatus[dirlist.length]; 257 | for (int i = 0; i < status.length; i++) { 258 | status[i] = getFileStatus(new Path(path, dirlist[i])); 259 | } 260 | return status; 261 | } 262 | else { 263 | throw new FileNotFoundException("File " + path + " does not exist."); 264 | } 265 | } 266 | 267 | @Override 268 | public void setPermission(Path path, FsPermission permission) throws IOException { 269 | path = makeAbsolute(path); 270 | ceph.chmod(path, permission.toShort()); 271 | } 272 | 273 | @Override 274 | public void setTimes(Path path, long mtime, long atime) throws IOException { 275 | path = makeAbsolute(path); 276 | 277 | CephStat stat = new CephStat(); 278 | int mask = 0; 279 | 280 | if (mtime != -1) { 281 | mask |= CephMount.SETATTR_MTIME; 282 | stat.m_time = mtime; 283 | } 284 | 285 | if (atime != -1) { 286 | mask |= CephMount.SETATTR_ATIME; 287 | stat.a_time = atime; 288 | } 289 | 290 | ceph.setattr(path, stat, mask); 291 | } 292 | 293 | /** 294 | * Get data pools from configuration. 295 | * 296 | * Package-private: used by unit tests 297 | */ 298 | String[] getConfiguredDataPools() { 299 | String pool_list = getConf().get( 300 | CephConfigKeys.CEPH_DATA_POOLS_KEY, 301 | CephConfigKeys.CEPH_DATA_POOLS_DEFAULT); 302 | 303 | if (pool_list != null) 304 | return pool_list.split(","); 305 | 306 | return new String[0]; 307 | } 308 | 309 | /** 310 | * Lookup pool size by name. 311 | * 312 | * Package-private: used by unit tests 313 | */ 314 | int getPoolReplication(String pool_name) throws IOException { 315 | int pool_id = ceph.get_pool_id(pool_name); 316 | return ceph.get_pool_replication(pool_id); 317 | } 318 | 319 | /** 320 | * Select a data pool given the requested replication factor. 321 | */ 322 | private String selectDataPool(Path path, int repl_wanted) throws IOException { 323 | /* map pool size -> pool name */ 324 | TreeMap pools = new TreeMap(); 325 | 326 | /* 327 | * Start with a mapping for the default pool. An error here would indicate 328 | * something bad, so we throw any exceptions. For configured pools we 329 | * ignore some errors. 330 | */ 331 | int fd = ceph.__open(new Path("/"), CephMount.O_RDONLY, 0); 332 | String pool_name = ceph.get_file_pool_name(fd); 333 | ceph.close(fd); 334 | int replication = getPoolReplication(pool_name); 335 | pools.put(new Integer(replication), pool_name); 336 | 337 | /* 338 | * Insert extra data pools from configuration. Errors are logged (most 339 | * likely a non-existant pool), and a configured pool will override the 340 | * default pool. 341 | */ 342 | String[] conf_pools = getConfiguredDataPools(); 343 | for (String name : conf_pools) { 344 | try { 345 | replication = getPoolReplication(name); 346 | pools.put(new Integer(replication), name); 347 | } catch (IOException e) { 348 | LOG.warn("Error looking up replication of pool: " + name + ", " + e); 349 | } 350 | } 351 | 352 | /* Choose smallest entry >= target, or largest in map. */ 353 | Map.Entry entry = pools.ceilingEntry(new Integer(repl_wanted)); 354 | if (entry == null) 355 | entry = pools.lastEntry(); 356 | 357 | /* should always contain default pool */ 358 | assert(entry != null); 359 | 360 | replication = entry.getKey().intValue(); 361 | pool_name = entry.getValue(); 362 | 363 | /* log non-exact match cases */ 364 | if (replication != repl_wanted) { 365 | LOG.info("selectDataPool path=" + path + " pool:repl=" + 366 | pool_name + ":" + replication + " wanted=" + repl_wanted); 367 | } 368 | 369 | return pool_name; 370 | } 371 | 372 | /** 373 | * Create a new file and open an FSDataOutputStream that's connected to it. 374 | * @param path The file to create. 375 | * @param permission The permissions to apply to the file. 376 | * @param overwrite If true, overwrite any existing file with 377 | * this name; otherwise don't. 378 | * @param bufferSize Ceph does internal buffering, but you can buffer 379 | * in the Java code too if you like. 380 | * @param replication Replication factor. See documentation on the 381 | * "ceph.data.pools" configuration option. 382 | * @param blockSize Ignored by Ceph. You can set client-wide block sizes 383 | * via the fs.ceph.blockSize param if you like. 384 | * @param progress A Progressable to report back to. 385 | * Reporting is limited but exists. 386 | * @return An FSDataOutputStream pointing to the created file. 387 | * @throws IOException if the path is an 388 | * existing directory, or the path exists but overwrite is false, or there is a 389 | * failure in attempting to open for append with Ceph. 390 | */ 391 | public FSDataOutputStream create(Path path, FsPermission permission, 392 | boolean overwrite, int bufferSize, short replication, long blockSize, 393 | Progressable progress) throws IOException { 394 | 395 | path = makeAbsolute(path); 396 | 397 | boolean exists = exists(path); 398 | 399 | if (progress != null) { 400 | progress.progress(); 401 | } 402 | 403 | int flags = CephMount.O_WRONLY | CephMount.O_CREAT; 404 | 405 | if (exists) { 406 | if (overwrite) 407 | flags |= CephMount.O_TRUNC; 408 | else 409 | throw new FileAlreadyExistsException(); 410 | } else { 411 | Path parent = path.getParent(); 412 | if (parent != null) 413 | if (!mkdirs(parent)) 414 | throw new IOException("mkdirs failed for " + parent.toString()); 415 | } 416 | 417 | if (progress != null) { 418 | progress.progress(); 419 | } 420 | 421 | /* Sanity check. Ceph interface uses int for striping strategy */ 422 | if (blockSize > Integer.MAX_VALUE) { 423 | blockSize = Integer.MAX_VALUE; 424 | LOG.info("blockSize too large. Rounding down to " + blockSize); 425 | } 426 | 427 | /* 428 | * If blockSize <= 0 then we complain. We need to explicitly check for the 429 | * < 0 case (as opposed to allowing Ceph to raise an exception) because 430 | * the ceph_open_layout interface accepts -1 to request Ceph-specific 431 | * defaults. 432 | */ 433 | if (blockSize <= 0) 434 | throw new IllegalArgumentException("Invalid block size: " + blockSize); 435 | 436 | /* 437 | * Ceph may impose alignment restrictions on file layout. In this case we 438 | * check if the requested block size is aligned to the granularity of a 439 | * stripe unit used in the file system. When the block size is not aligned 440 | * we automatically adjust to the next largest multiple of stripe unit 441 | * granularity. 442 | */ 443 | int su = ceph.get_stripe_unit_granularity(); 444 | if (blockSize % su != 0) { 445 | long newBlockSize = blockSize - (blockSize % su) + su; 446 | LOG.debug("fix alignment: blksize " + blockSize + " new blksize " + newBlockSize); 447 | blockSize = newBlockSize; 448 | } 449 | 450 | /* 451 | * The default Ceph data pool is selected to store files unless a specific 452 | * data pool is provided when a file is created. Since a pool has a fixed 453 | * replication factor, in order to achieve a requested replication factor, 454 | * we must select an appropriate data pool to place the file into. 455 | */ 456 | String datapool = selectDataPool(path, replication); 457 | int fd = ceph.open(path, flags, (int)permission.toShort(), (int)blockSize, 458 | CEPH_STRIPE_COUNT, (int)blockSize, datapool); 459 | 460 | if (progress != null) { 461 | progress.progress(); 462 | } 463 | 464 | OutputStream ostream = new CephOutputStream(getConf(), ceph, fd, 465 | bufferSize); 466 | return new FSDataOutputStream(ostream, statistics); 467 | } 468 | 469 | /** 470 | * Opens an FSDataOutputStream at the indicated Path with write-progress 471 | * reporting. Same as create(), except fails if parent directory doesn't 472 | * already exist. 473 | * @param path the file name to open 474 | * @param permission 475 | * @param overwrite if a file with this name already exists, then if true, 476 | * the file will be overwritten, and if false an error will be thrown. 477 | * @param bufferSize the size of the buffer to be used. 478 | * @param replication required block replication for the file. 479 | * @param blockSize 480 | * @param progress 481 | * @throws IOException 482 | * @see #setPermission(Path, FsPermission) 483 | * @deprecated API only for 0.20-append 484 | */ 485 | @Deprecated 486 | public FSDataOutputStream createNonRecursive(Path path, FsPermission permission, 487 | boolean overwrite, 488 | int bufferSize, short replication, long blockSize, 489 | Progressable progress) throws IOException { 490 | 491 | path = makeAbsolute(path); 492 | 493 | Path parent = path.getParent(); 494 | 495 | if (parent != null) { 496 | CephStat stat = new CephStat(); 497 | ceph.lstat(parent, stat); // handles FileNotFoundException case 498 | if (stat.isFile()) 499 | throw new FileAlreadyExistsException(parent.toString()); 500 | } 501 | 502 | return this.create(path, permission, overwrite, 503 | bufferSize, replication, blockSize, progress); 504 | } 505 | 506 | /** 507 | * Rename a file or directory. 508 | * @param src The current path of the file/directory 509 | * @param dst The new name for the path. 510 | * @return true if the rename succeeded, false otherwise. 511 | */ 512 | @Override 513 | public boolean rename(Path src, Path dst) throws IOException { 514 | src = makeAbsolute(src); 515 | dst = makeAbsolute(dst); 516 | 517 | try { 518 | CephStat stat = new CephStat(); 519 | ceph.lstat(dst, stat); 520 | if (stat.isDir()) 521 | return rename(src, new Path(dst, src.getName())); 522 | return false; 523 | } catch (FileNotFoundException e) {} 524 | 525 | try { 526 | ceph.rename(src, dst); 527 | } catch (FileNotFoundException e) { 528 | throw e; 529 | } catch (Exception e) { 530 | return false; 531 | } 532 | 533 | return true; 534 | } 535 | 536 | /** 537 | * Get a BlockLocation object for each block in a file. 538 | * 539 | * @param file A FileStatus object corresponding to the file you want locations for. 540 | * @param start The offset of the first part of the file you are interested in. 541 | * @param len The amount of the file past the offset you are interested in. 542 | * @return A BlockLocation[] where each object corresponds to a block within 543 | * the given range. 544 | */ 545 | @Override 546 | public BlockLocation[] getFileBlockLocations(FileStatus file, long start, long len) throws IOException { 547 | Path abs_path = makeAbsolute(file.getPath()); 548 | 549 | int fh = ceph.open(abs_path, CephMount.O_RDONLY, 0); 550 | if (fh < 0) { 551 | LOG.error("getFileBlockLocations:got error " + fh + ", exiting and returning null!"); 552 | return null; 553 | } 554 | 555 | ArrayList blocks = new ArrayList(); 556 | 557 | long curPos = start; 558 | long endOff = curPos + len; 559 | do { 560 | CephFileExtent extent = ceph.get_file_extent(fh, curPos); 561 | 562 | int[] osds = extent.getOSDs(); 563 | String[] names = new String[osds.length]; 564 | String[] hosts = new String[osds.length]; 565 | String[] racks = new String[osds.length]; 566 | 567 | for (int i = 0; i < osds.length; i++) { 568 | InetAddress addr = ceph.get_osd_address(osds[i]); 569 | names[i] = addr.getHostAddress(); 570 | 571 | /* 572 | * Grab the hostname and rack from the crush hierarchy. Current we 573 | * hard code the item types. For a more general treatment, we'll need 574 | * a new configuration option that allows users to map their custom 575 | * crush types to hosts and topology. 576 | */ 577 | Bucket[] path = ceph.get_osd_crush_location(osds[i]); 578 | for (Bucket bucket : path) { 579 | String type = bucket.getType(); 580 | if (type.compareTo("host") == 0) 581 | hosts[i] = bucket.getName(); 582 | else if (type.compareTo("rack") == 0) 583 | racks[i] = bucket.getName(); 584 | } 585 | } 586 | 587 | blocks.add(new BlockLocation(names, hosts, racks, 588 | extent.getOffset(), extent.getLength())); 589 | 590 | curPos += extent.getLength(); 591 | } while(curPos < endOff); 592 | 593 | ceph.close(fh); 594 | 595 | BlockLocation[] locations = new BlockLocation[blocks.size()]; 596 | locations = blocks.toArray(locations); 597 | 598 | return locations; 599 | } 600 | 601 | @Deprecated 602 | public boolean delete(Path path) throws IOException { 603 | return delete(path, false); 604 | } 605 | 606 | public boolean delete(Path path, boolean recursive) throws IOException { 607 | path = makeAbsolute(path); 608 | 609 | /* path exists? */ 610 | FileStatus status; 611 | try { 612 | status = getFileStatus(path); 613 | } catch (FileNotFoundException e) { 614 | return false; 615 | } 616 | 617 | /* we're done if its a file */ 618 | if (status.isFile()) { 619 | ceph.unlink(path); 620 | return true; 621 | } 622 | 623 | /* get directory contents */ 624 | FileStatus[] dirlist = listStatus(path); 625 | if (dirlist == null) 626 | return false; 627 | 628 | if (!recursive && dirlist.length > 0) 629 | throw new IOException("Directory " + path.toString() + "is not empty."); 630 | 631 | for (FileStatus fs : dirlist) { 632 | if (!delete(fs.getPath(), recursive)) 633 | return false; 634 | } 635 | 636 | ceph.rmdir(path); 637 | return true; 638 | } 639 | 640 | @Override 641 | public short getDefaultReplication() { 642 | return ceph.getDefaultReplication(); 643 | } 644 | 645 | @Override 646 | public long getDefaultBlockSize() { 647 | return getConf().getLong( 648 | CephConfigKeys.CEPH_OBJECT_SIZE_KEY, 649 | CephConfigKeys.CEPH_OBJECT_SIZE_DEFAULT); 650 | } 651 | 652 | @Override 653 | public FsStatus getStatus(Path p) throws IOException { 654 | CephStatVFS stat = new CephStatVFS(); 655 | ceph.statfs(p, stat); 656 | 657 | FsStatus status = new FsStatus(stat.bsize * stat.blocks, 658 | stat.bsize * (stat.blocks - stat.bavail), 659 | stat.bsize * stat.bavail); 660 | return status; 661 | } 662 | 663 | @Override 664 | protected int getDefaultPort() { 665 | return getConf().getInt( 666 | CephConfigKeys.CEPH_PORT, 667 | CephConfigKeys.CEPH_PORT_DEFAULT); 668 | } 669 | 670 | } 671 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | GNU LESSER GENERAL PUBLIC LICENSE 2 | Version 2.1, February 1999 3 | 4 | Copyright (C) 1991, 1999 Free Software Foundation, Inc. 5 | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 6 | Everyone is permitted to copy and distribute verbatim copies 7 | of this license document, but changing it is not allowed. 8 | 9 | (This is the first released version of the Lesser GPL. It also counts 10 | as the successor of the GNU Library Public License, version 2, hence 11 | the version number 2.1.) 12 | 13 | Preamble 14 | 15 | The licenses for most software are designed to take away your 16 | freedom to share and change it. By contrast, the GNU General Public 17 | Licenses are intended to guarantee your freedom to share and change 18 | free software--to make sure the software is free for all its users. 19 | 20 | This license, the Lesser General Public License, applies to some 21 | specially designated software packages--typically libraries--of the 22 | Free Software Foundation and other authors who decide to use it. You 23 | can use it too, but we suggest you first think carefully about whether 24 | this license or the ordinary General Public License is the better 25 | strategy to use in any particular case, based on the explanations below. 26 | 27 | When we speak of free software, we are referring to freedom of use, 28 | not price. Our General Public Licenses are designed to make sure that 29 | you have the freedom to distribute copies of free software (and charge 30 | for this service if you wish); that you receive source code or can get 31 | it if you want it; that you can change the software and use pieces of 32 | it in new free programs; and that you are informed that you can do 33 | these things. 34 | 35 | To protect your rights, we need to make restrictions that forbid 36 | distributors to deny you these rights or to ask you to surrender these 37 | rights. These restrictions translate to certain responsibilities for 38 | you if you distribute copies of the library or if you modify it. 39 | 40 | For example, if you distribute copies of the library, whether gratis 41 | or for a fee, you must give the recipients all the rights that we gave 42 | you. You must make sure that they, too, receive or can get the source 43 | code. If you link other code with the library, you must provide 44 | complete object files to the recipients, so that they can relink them 45 | with the library after making changes to the library and recompiling 46 | it. And you must show them these terms so they know their rights. 47 | 48 | We protect your rights with a two-step method: (1) we copyright the 49 | library, and (2) we offer you this license, which gives you legal 50 | permission to copy, distribute and/or modify the library. 51 | 52 | To protect each distributor, we want to make it very clear that 53 | there is no warranty for the free library. Also, if the library is 54 | modified by someone else and passed on, the recipients should know 55 | that what they have is not the original version, so that the original 56 | author's reputation will not be affected by problems that might be 57 | introduced by others. 58 | 59 | Finally, software patents pose a constant threat to the existence of 60 | any free program. We wish to make sure that a company cannot 61 | effectively restrict the users of a free program by obtaining a 62 | restrictive license from a patent holder. Therefore, we insist that 63 | any patent license obtained for a version of the library must be 64 | consistent with the full freedom of use specified in this license. 65 | 66 | Most GNU software, including some libraries, is covered by the 67 | ordinary GNU General Public License. This license, the GNU Lesser 68 | General Public License, applies to certain designated libraries, and 69 | is quite different from the ordinary General Public License. We use 70 | this license for certain libraries in order to permit linking those 71 | libraries into non-free programs. 72 | 73 | When a program is linked with a library, whether statically or using 74 | a shared library, the combination of the two is legally speaking a 75 | combined work, a derivative of the original library. The ordinary 76 | General Public License therefore permits such linking only if the 77 | entire combination fits its criteria of freedom. The Lesser General 78 | Public License permits more lax criteria for linking other code with 79 | the library. 80 | 81 | We call this license the "Lesser" General Public License because it 82 | does Less to protect the user's freedom than the ordinary General 83 | Public License. It also provides other free software developers Less 84 | of an advantage over competing non-free programs. These disadvantages 85 | are the reason we use the ordinary General Public License for many 86 | libraries. However, the Lesser license provides advantages in certain 87 | special circumstances. 88 | 89 | For example, on rare occasions, there may be a special need to 90 | encourage the widest possible use of a certain library, so that it becomes 91 | a de-facto standard. To achieve this, non-free programs must be 92 | allowed to use the library. A more frequent case is that a free 93 | library does the same job as widely used non-free libraries. In this 94 | case, there is little to gain by limiting the free library to free 95 | software only, so we use the Lesser General Public License. 96 | 97 | In other cases, permission to use a particular library in non-free 98 | programs enables a greater number of people to use a large body of 99 | free software. For example, permission to use the GNU C Library in 100 | non-free programs enables many more people to use the whole GNU 101 | operating system, as well as its variant, the GNU/Linux operating 102 | system. 103 | 104 | Although the Lesser General Public License is Less protective of the 105 | users' freedom, it does ensure that the user of a program that is 106 | linked with the Library has the freedom and the wherewithal to run 107 | that program using a modified version of the Library. 108 | 109 | The precise terms and conditions for copying, distribution and 110 | modification follow. Pay close attention to the difference between a 111 | "work based on the library" and a "work that uses the library". The 112 | former contains code derived from the library, whereas the latter must 113 | be combined with the library in order to run. 114 | 115 | GNU LESSER GENERAL PUBLIC LICENSE 116 | TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 117 | 118 | 0. This License Agreement applies to any software library or other 119 | program which contains a notice placed by the copyright holder or 120 | other authorized party saying it may be distributed under the terms of 121 | this Lesser General Public License (also called "this License"). 122 | Each licensee is addressed as "you". 123 | 124 | A "library" means a collection of software functions and/or data 125 | prepared so as to be conveniently linked with application programs 126 | (which use some of those functions and data) to form executables. 127 | 128 | The "Library", below, refers to any such software library or work 129 | which has been distributed under these terms. A "work based on the 130 | Library" means either the Library or any derivative work under 131 | copyright law: that is to say, a work containing the Library or a 132 | portion of it, either verbatim or with modifications and/or translated 133 | straightforwardly into another language. (Hereinafter, translation is 134 | included without limitation in the term "modification".) 135 | 136 | "Source code" for a work means the preferred form of the work for 137 | making modifications to it. For a library, complete source code means 138 | all the source code for all modules it contains, plus any associated 139 | interface definition files, plus the scripts used to control compilation 140 | and installation of the library. 141 | 142 | Activities other than copying, distribution and modification are not 143 | covered by this License; they are outside its scope. The act of 144 | running a program using the Library is not restricted, and output from 145 | such a program is covered only if its contents constitute a work based 146 | on the Library (independent of the use of the Library in a tool for 147 | writing it). Whether that is true depends on what the Library does 148 | and what the program that uses the Library does. 149 | 150 | 1. You may copy and distribute verbatim copies of the Library's 151 | complete source code as you receive it, in any medium, provided that 152 | you conspicuously and appropriately publish on each copy an 153 | appropriate copyright notice and disclaimer of warranty; keep intact 154 | all the notices that refer to this License and to the absence of any 155 | warranty; and distribute a copy of this License along with the 156 | Library. 157 | 158 | You may charge a fee for the physical act of transferring a copy, 159 | and you may at your option offer warranty protection in exchange for a 160 | fee. 161 | 162 | 2. You may modify your copy or copies of the Library or any portion 163 | of it, thus forming a work based on the Library, and copy and 164 | distribute such modifications or work under the terms of Section 1 165 | above, provided that you also meet all of these conditions: 166 | 167 | a) The modified work must itself be a software library. 168 | 169 | b) You must cause the files modified to carry prominent notices 170 | stating that you changed the files and the date of any change. 171 | 172 | c) You must cause the whole of the work to be licensed at no 173 | charge to all third parties under the terms of this License. 174 | 175 | d) If a facility in the modified Library refers to a function or a 176 | table of data to be supplied by an application program that uses 177 | the facility, other than as an argument passed when the facility 178 | is invoked, then you must make a good faith effort to ensure that, 179 | in the event an application does not supply such function or 180 | table, the facility still operates, and performs whatever part of 181 | its purpose remains meaningful. 182 | 183 | (For example, a function in a library to compute square roots has 184 | a purpose that is entirely well-defined independent of the 185 | application. Therefore, Subsection 2d requires that any 186 | application-supplied function or table used by this function must 187 | be optional: if the application does not supply it, the square 188 | root function must still compute square roots.) 189 | 190 | These requirements apply to the modified work as a whole. If 191 | identifiable sections of that work are not derived from the Library, 192 | and can be reasonably considered independent and separate works in 193 | themselves, then this License, and its terms, do not apply to those 194 | sections when you distribute them as separate works. But when you 195 | distribute the same sections as part of a whole which is a work based 196 | on the Library, the distribution of the whole must be on the terms of 197 | this License, whose permissions for other licensees extend to the 198 | entire whole, and thus to each and every part regardless of who wrote 199 | it. 200 | 201 | Thus, it is not the intent of this section to claim rights or contest 202 | your rights to work written entirely by you; rather, the intent is to 203 | exercise the right to control the distribution of derivative or 204 | collective works based on the Library. 205 | 206 | In addition, mere aggregation of another work not based on the Library 207 | with the Library (or with a work based on the Library) on a volume of 208 | a storage or distribution medium does not bring the other work under 209 | the scope of this License. 210 | 211 | 3. You may opt to apply the terms of the ordinary GNU General Public 212 | License instead of this License to a given copy of the Library. To do 213 | this, you must alter all the notices that refer to this License, so 214 | that they refer to the ordinary GNU General Public License, version 2, 215 | instead of to this License. (If a newer version than version 2 of the 216 | ordinary GNU General Public License has appeared, then you can specify 217 | that version instead if you wish.) Do not make any other change in 218 | these notices. 219 | 220 | Once this change is made in a given copy, it is irreversible for 221 | that copy, so the ordinary GNU General Public License applies to all 222 | subsequent copies and derivative works made from that copy. 223 | 224 | This option is useful when you wish to copy part of the code of 225 | the Library into a program that is not a library. 226 | 227 | 4. You may copy and distribute the Library (or a portion or 228 | derivative of it, under Section 2) in object code or executable form 229 | under the terms of Sections 1 and 2 above provided that you accompany 230 | it with the complete corresponding machine-readable source code, which 231 | must be distributed under the terms of Sections 1 and 2 above on a 232 | medium customarily used for software interchange. 233 | 234 | If distribution of object code is made by offering access to copy 235 | from a designated place, then offering equivalent access to copy the 236 | source code from the same place satisfies the requirement to 237 | distribute the source code, even though third parties are not 238 | compelled to copy the source along with the object code. 239 | 240 | 5. A program that contains no derivative of any portion of the 241 | Library, but is designed to work with the Library by being compiled or 242 | linked with it, is called a "work that uses the Library". Such a 243 | work, in isolation, is not a derivative work of the Library, and 244 | therefore falls outside the scope of this License. 245 | 246 | However, linking a "work that uses the Library" with the Library 247 | creates an executable that is a derivative of the Library (because it 248 | contains portions of the Library), rather than a "work that uses the 249 | library". The executable is therefore covered by this License. 250 | Section 6 states terms for distribution of such executables. 251 | 252 | When a "work that uses the Library" uses material from a header file 253 | that is part of the Library, the object code for the work may be a 254 | derivative work of the Library even though the source code is not. 255 | Whether this is true is especially significant if the work can be 256 | linked without the Library, or if the work is itself a library. The 257 | threshold for this to be true is not precisely defined by law. 258 | 259 | If such an object file uses only numerical parameters, data 260 | structure layouts and accessors, and small macros and small inline 261 | functions (ten lines or less in length), then the use of the object 262 | file is unrestricted, regardless of whether it is legally a derivative 263 | work. (Executables containing this object code plus portions of the 264 | Library will still fall under Section 6.) 265 | 266 | Otherwise, if the work is a derivative of the Library, you may 267 | distribute the object code for the work under the terms of Section 6. 268 | Any executables containing that work also fall under Section 6, 269 | whether or not they are linked directly with the Library itself. 270 | 271 | 6. As an exception to the Sections above, you may also combine or 272 | link a "work that uses the Library" with the Library to produce a 273 | work containing portions of the Library, and distribute that work 274 | under terms of your choice, provided that the terms permit 275 | modification of the work for the customer's own use and reverse 276 | engineering for debugging such modifications. 277 | 278 | You must give prominent notice with each copy of the work that the 279 | Library is used in it and that the Library and its use are covered by 280 | this License. You must supply a copy of this License. If the work 281 | during execution displays copyright notices, you must include the 282 | copyright notice for the Library among them, as well as a reference 283 | directing the user to the copy of this License. Also, you must do one 284 | of these things: 285 | 286 | a) Accompany the work with the complete corresponding 287 | machine-readable source code for the Library including whatever 288 | changes were used in the work (which must be distributed under 289 | Sections 1 and 2 above); and, if the work is an executable linked 290 | with the Library, with the complete machine-readable "work that 291 | uses the Library", as object code and/or source code, so that the 292 | user can modify the Library and then relink to produce a modified 293 | executable containing the modified Library. (It is understood 294 | that the user who changes the contents of definitions files in the 295 | Library will not necessarily be able to recompile the application 296 | to use the modified definitions.) 297 | 298 | b) Use a suitable shared library mechanism for linking with the 299 | Library. A suitable mechanism is one that (1) uses at run time a 300 | copy of the library already present on the user's computer system, 301 | rather than copying library functions into the executable, and (2) 302 | will operate properly with a modified version of the library, if 303 | the user installs one, as long as the modified version is 304 | interface-compatible with the version that the work was made with. 305 | 306 | c) Accompany the work with a written offer, valid for at 307 | least three years, to give the same user the materials 308 | specified in Subsection 6a, above, for a charge no more 309 | than the cost of performing this distribution. 310 | 311 | d) If distribution of the work is made by offering access to copy 312 | from a designated place, offer equivalent access to copy the above 313 | specified materials from the same place. 314 | 315 | e) Verify that the user has already received a copy of these 316 | materials or that you have already sent this user a copy. 317 | 318 | For an executable, the required form of the "work that uses the 319 | Library" must include any data and utility programs needed for 320 | reproducing the executable from it. However, as a special exception, 321 | the materials to be distributed need not include anything that is 322 | normally distributed (in either source or binary form) with the major 323 | components (compiler, kernel, and so on) of the operating system on 324 | which the executable runs, unless that component itself accompanies 325 | the executable. 326 | 327 | It may happen that this requirement contradicts the license 328 | restrictions of other proprietary libraries that do not normally 329 | accompany the operating system. Such a contradiction means you cannot 330 | use both them and the Library together in an executable that you 331 | distribute. 332 | 333 | 7. You may place library facilities that are a work based on the 334 | Library side-by-side in a single library together with other library 335 | facilities not covered by this License, and distribute such a combined 336 | library, provided that the separate distribution of the work based on 337 | the Library and of the other library facilities is otherwise 338 | permitted, and provided that you do these two things: 339 | 340 | a) Accompany the combined library with a copy of the same work 341 | based on the Library, uncombined with any other library 342 | facilities. This must be distributed under the terms of the 343 | Sections above. 344 | 345 | b) Give prominent notice with the combined library of the fact 346 | that part of it is a work based on the Library, and explaining 347 | where to find the accompanying uncombined form of the same work. 348 | 349 | 8. You may not copy, modify, sublicense, link with, or distribute 350 | the Library except as expressly provided under this License. Any 351 | attempt otherwise to copy, modify, sublicense, link with, or 352 | distribute the Library is void, and will automatically terminate your 353 | rights under this License. However, parties who have received copies, 354 | or rights, from you under this License will not have their licenses 355 | terminated so long as such parties remain in full compliance. 356 | 357 | 9. You are not required to accept this License, since you have not 358 | signed it. However, nothing else grants you permission to modify or 359 | distribute the Library or its derivative works. These actions are 360 | prohibited by law if you do not accept this License. Therefore, by 361 | modifying or distributing the Library (or any work based on the 362 | Library), you indicate your acceptance of this License to do so, and 363 | all its terms and conditions for copying, distributing or modifying 364 | the Library or works based on it. 365 | 366 | 10. Each time you redistribute the Library (or any work based on the 367 | Library), the recipient automatically receives a license from the 368 | original licensor to copy, distribute, link with or modify the Library 369 | subject to these terms and conditions. You may not impose any further 370 | restrictions on the recipients' exercise of the rights granted herein. 371 | You are not responsible for enforcing compliance by third parties with 372 | this License. 373 | 374 | 11. If, as a consequence of a court judgment or allegation of patent 375 | infringement or for any other reason (not limited to patent issues), 376 | conditions are imposed on you (whether by court order, agreement or 377 | otherwise) that contradict the conditions of this License, they do not 378 | excuse you from the conditions of this License. If you cannot 379 | distribute so as to satisfy simultaneously your obligations under this 380 | License and any other pertinent obligations, then as a consequence you 381 | may not distribute the Library at all. For example, if a patent 382 | license would not permit royalty-free redistribution of the Library by 383 | all those who receive copies directly or indirectly through you, then 384 | the only way you could satisfy both it and this License would be to 385 | refrain entirely from distribution of the Library. 386 | 387 | If any portion of this section is held invalid or unenforceable under any 388 | particular circumstance, the balance of the section is intended to apply, 389 | and the section as a whole is intended to apply in other circumstances. 390 | 391 | It is not the purpose of this section to induce you to infringe any 392 | patents or other property right claims or to contest validity of any 393 | such claims; this section has the sole purpose of protecting the 394 | integrity of the free software distribution system which is 395 | implemented by public license practices. Many people have made 396 | generous contributions to the wide range of software distributed 397 | through that system in reliance on consistent application of that 398 | system; it is up to the author/donor to decide if he or she is willing 399 | to distribute software through any other system and a licensee cannot 400 | impose that choice. 401 | 402 | This section is intended to make thoroughly clear what is believed to 403 | be a consequence of the rest of this License. 404 | 405 | 12. If the distribution and/or use of the Library is restricted in 406 | certain countries either by patents or by copyrighted interfaces, the 407 | original copyright holder who places the Library under this License may add 408 | an explicit geographical distribution limitation excluding those countries, 409 | so that distribution is permitted only in or among countries not thus 410 | excluded. In such case, this License incorporates the limitation as if 411 | written in the body of this License. 412 | 413 | 13. The Free Software Foundation may publish revised and/or new 414 | versions of the Lesser General Public License from time to time. 415 | Such new versions will be similar in spirit to the present version, 416 | but may differ in detail to address new problems or concerns. 417 | 418 | Each version is given a distinguishing version number. If the Library 419 | specifies a version number of this License which applies to it and 420 | "any later version", you have the option of following the terms and 421 | conditions either of that version or of any later version published by 422 | the Free Software Foundation. If the Library does not specify a 423 | license version number, you may choose any version ever published by 424 | the Free Software Foundation. 425 | 426 | 14. If you wish to incorporate parts of the Library into other free 427 | programs whose distribution conditions are incompatible with these, 428 | write to the author to ask for permission. For software which is 429 | copyrighted by the Free Software Foundation, write to the Free 430 | Software Foundation; we sometimes make exceptions for this. Our 431 | decision will be guided by the two goals of preserving the free status 432 | of all derivatives of our free software and of promoting the sharing 433 | and reuse of software generally. 434 | 435 | NO WARRANTY 436 | 437 | 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO 438 | WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. 439 | EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR 440 | OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY 441 | KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE 442 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 443 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE 444 | LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME 445 | THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 446 | 447 | 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN 448 | WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY 449 | AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU 450 | FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR 451 | CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE 452 | LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING 453 | RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A 454 | FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF 455 | SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH 456 | DAMAGES. 457 | 458 | END OF TERMS AND CONDITIONS 459 | 460 | How to Apply These Terms to Your New Libraries 461 | 462 | If you develop a new library, and you want it to be of the greatest 463 | possible use to the public, we recommend making it free software that 464 | everyone can redistribute and change. You can do so by permitting 465 | redistribution under these terms (or, alternatively, under the terms of the 466 | ordinary General Public License). 467 | 468 | To apply these terms, attach the following notices to the library. It is 469 | safest to attach them to the start of each source file to most effectively 470 | convey the exclusion of warranty; and each file should have at least the 471 | "copyright" line and a pointer to where the full notice is found. 472 | 473 | {description} 474 | Copyright (C) {year} {fullname} 475 | 476 | This library is free software; you can redistribute it and/or 477 | modify it under the terms of the GNU Lesser General Public 478 | License as published by the Free Software Foundation; either 479 | version 2.1 of the License, or (at your option) any later version. 480 | 481 | This library is distributed in the hope that it will be useful, 482 | but WITHOUT ANY WARRANTY; without even the implied warranty of 483 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 484 | Lesser General Public License for more details. 485 | 486 | You should have received a copy of the GNU Lesser General Public 487 | License along with this library; if not, write to the Free Software 488 | Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 489 | USA 490 | 491 | Also add information on how to contact you by electronic and paper mail. 492 | 493 | You should also get your employer (if you work as a programmer) or your 494 | school, if any, to sign a "copyright disclaimer" for the library, if 495 | necessary. Here is a sample; alter the names: 496 | 497 | Yoyodyne, Inc., hereby disclaims all copyright interest in the 498 | library `Frob' (a library for tweaking knobs) written by James Random 499 | Hacker. 500 | 501 | {signature of Ty Coon}, 1 April 1990 502 | Ty Coon, President of Vice 503 | 504 | That's all there is to it! --------------------------------------------------------------------------------