cephfs

Installing ceph using kolla-ansible for all-in-one setup

╄→尐↘猪︶ㄣ 提交于 2020-12-07 07:15:09
问题 I am trying to deploy the all-in-one configuration using kolla-ansible with ceph enabled enable_ceph: "yes" #enable_ceph_mds: "no" enable_ceph_rgw: "yes" #enable_ceph_nfs: "no" enable_ceph_dashboard: "{{ enable_ceph | bool }}" #enable_chrony: "yes" enable_cinder: "yes" enable_cinder_backup: "yes" glance_backend_ceph: "yes" gnocchi_backend_storage: "{{ 'ceph' if enable_ceph|bool else 'file' }}" cinder_backend_ceph: "{{ enable_ceph }}" cinder_backup_driver: "ceph" nova_backend_ceph: "{{ enable

cephfs linux kernel client针对linux page cache的操作

依然范特西╮ 提交于 2019-12-10 03:27:21
针对linux page cache的操作主要体现在struct address_space_operations数据结构中,cephfs处理linux page cache的函数集合如下: const struct address_space_operations ceph_aops = { .readpage = ceph_readpage, .readpages = ceph_readpages, .writepage = ceph_writepage, .writepages = ceph_writepages_start, .write_begin = ceph_write_begin, .write_end = ceph_write_end, .set_page_dirty = ceph_set_page_dirty, .invalidatepage = ceph_invalidatepage, .releasepage = ceph_releasepage, .direct_IO = ceph_direct_io, }; ceph_readpage(struct file *filp, struct page *page) |__调用readpage_nounlock(filep, page) 在加锁的情况下读取一个物理内存页的数据 |_

cephfs kernel client针对inode的相关操作

随声附和 提交于 2019-12-09 10:11:14
针对文件的inode的操作体现在数据结构struct inode_operations中,具体内容如下: const struct inode_operations ceph_file_iops = { .permission = ceph_permission, .setattr = ceph_setattr, .getattr = ceph_getattr, .setxattr = generic_setxattr, .getxattr = generic_getxattr, .listxattr = ceph_listxattr, .removexattr = generic_removexattr, .get_acl = ceph_get_acl, .set_acl = ceph_set_acl, }; ceph_permission(struct inode *inode, int mask) 检查inode是否有mask指定的访问权限 |__调用ceph_do_getattr()函数从本地缓存或mds集群中得到CEPH_CAP_AUTH_SHARED对应的权限 |__调用generic_permission()函数做常规的权限检查 ceph_setattr(struct dentry *dentry, struct iattr *attr) 设置文件属性时调用该函数 |

cephfs linux kernel client针对export的操作

走远了吗. 提交于 2019-12-09 10:11:01
const struct export_operations ceph_export_ops = { .encode_fh = ceph_encode_fh, .fh_to_dentry = ceph_fh_to_dentry, .fh_to_parent = ceph_fh_to_parent, .get_parent = ceph_get_parent, .get_name = ceph_get_name, }; ceph_encode_fh(struct inode *inode, u32 *rawfh, int *max_len, struct inode *parent_inode) |__调用ceph_snap(inode)函数检查inode是否包含snap信息,若包含snap则直接返回 |__输入参数校验 |__若parent_inode不为空且max_len小于sizeof(struct ceph_nfs_confh)/4 |__设置max_len=sizeof(struct ceph_nfs_confh)/4 |__直接返回 |__若parent_inode为空且max_len小于sizeof(struct ceph_nfs_fh)/4 |__设置max_len=sizeof(struct ceph_nfs_fh)/4 |__直接返回 |__若parent

cephfs linux kernel client针对fscache的操作

|▌冷眼眸甩不掉的悲伤 提交于 2019-12-09 10:10:45
针对inode在fscache中操作主要集中在数据结构struct fscache_cookie_def中,具体的数据结构及其操作如下: static const struct fscache_cookie_def ceph_fscache_inode_object_def = { .name = "CEPH.inode", .type = FSCACHE_COOKIE_TYPE_DATAFILE, .get_key = ceph_fscache_inode_get_key, .get_attr = ceph_fscache_inode_get_attr, .get_aux = ceph_fscache_inode_get_aux, .check_aux = ceph_fscache_inode_check_aux, .now_uncached = ceph_fscache_inode_now_uncached, }; ceph_fscache_inode_get_key(void *cookie_netfs_data, void *buffer, uint16_t maxbuf) 读取struct ceph_inode_info中的i_vino信息到buffer |__从参数cookie_netfs_data的到struct ceph_inode_info数据结构 |_

cephfs linux kernel client针对superblock操作流程的分析

允我心安 提交于 2019-12-07 14:03:05
init_caches() 初始化如下几个cache: ceph_inode_cachep ceph_cap_cachep ceph_cap_flush_cachep ceph_dentry_cachep ceph_file_cachep cephfs cache ceph_mount() |__解析mount options |__创建fs client,即:struct ceph_fs_client |__创建mds client且设置fs client和mds client之间的对应关系 |__得到superblock且使用ceph_set_super()函数初始化 |__调用ceph_real_mount()函数来执行实际的mount操作 ceph_real_mount() |__若superblock中对应的s_boot为空 |__调用__ceph_open_session()函数创建client到mds的session信息 |__调用open_root_dentry()函数得到cephfs的root dentry信息 |__将root dentry写入到superblock中的s_boot中 |__若mount options中没有server path内容 |__设置当前root为supberblock对应的s_root |__调用dget(root

cephfs kernel client针对打开文件的操作

浪子不回头ぞ 提交于 2019-12-07 14:02:52
针对打开文件的操作主要体现在struct file_operations数据结构中。在cephfs kernel client中具体实现如下: const struct file_operations ceph_file_fops = { .open = ceph_open, .release = ceph_release, .llseek = ceph_llseek, .read_iter = ceph_read_iter, .write_iter = ceph_write_iter, .mmap = ceph_mmap, .fsync = ceph_fsync, .lock = ceph_lock, .flock = ceph_flock, .splice_read = generic_file_splice_read, .splice_write = iter_file_splice_write, .unlocked_ioctl = ceph_ioctl, .compat_ioctl = ceph_ioctl, .fallocate = ceph_fallocate, }; ceph_open(struct inode *inode, struct file *file) 该函数在打开文件时被调用 |__调用prepare_open_request()函数来创建ceph

cephfs kernel client针对dentry的操作

北城以北 提交于 2019-12-07 14:02:38
cephfs kernel client针对dentry的操作 const struct dentry_operations ceph_dentry_ops = { .d_revalidate = ceph_d_revalidate, .d_release = ceph_d_release, .d_prune = ceph_d_prune, }; ceph_d_revalidate(struct dentry *dentry, unsigned int flags) 用于检查cache中的dentry是否有效 |__调用dget_parent()函数得到dentry对应的parent dentry |__调用d_inode()函数得到parent dentry对应的struct inode数据 |__调用dentry_lease_is_valid()函数检查dentry lease是否有效 |__若无效 |__得到操作码op=CEPH_MDS_OP_LOOKUPSNAP或者CEPH_MDS_OP_LOOKUP |__调用ceph_mdsc_create_request()函数创建mds请求 |__调用ceph_mdsc_do_request()函数将请求同步发送给mds进程 |__调用ceph_dentry_lru_touch()函数将dentry添加到lru中 ceph_d

cephfs kernel client针对dir的inode操作

删除回忆录丶 提交于 2019-12-07 14:02:24
cephfs kernel client针对dir的inode操作 const struct inode_operations ceph_dir_iops = { .lookup = ceph_lookup, .permission = ceph_permission, .getattr = ceph_getattr, .setattr = ceph_setattr, .setxattr = generic_setxattr, .getxattr = generic_getxattr, .listxattr = ceph_listxattr, .removexattr = generic_removexattr, .get_acl = ceph_get_acl, .set_acl = ceph_set_acl, .mknod = ceph_mknod, .symlink = ceph_symlink, .mkdir = ceph_mkdir, .link = ceph_link, .unlink = ceph_unlink, .rmdir = ceph_unlink, .rename = ceph_rename, .create = ceph_create, .atomic_open = ceph_atomic_open, }; ceph_lookup(struct inode

cephfs kernel client针对dir的file_operations操作

╄→尐↘猪︶ㄣ 提交于 2019-12-06 23:13:23
cephfs kernel client针对dir的file_operations操作 const struct file_operations ceph_dir_fops = { .read = ceph_read_dir, .iterate = ceph_readdir, .llseek = ceph_dir_llseek, .open = ceph_open, .release = ceph_release, .unlocked_ioctl = ceph_ioctl, .fsync = ceph_fsync, }; ceph_read_dir(struct file *file, char __user *buf, size_t size, loff_t *ppos) 只有在mount时带有参数-o dirstat时该函数才有效 |__调用ceph_test_mount_opt()函数检查mount options中是否包含DIRSTAT,若不包含则直接返回 |__若struct ceph_file_info中的dir_info为空 |__调用kmalloc()函数为dir_info分配空间 |__使用snprintf()函数向dir_info的内存空间进行格式化输出 |__调用copy_to_user()函数将dir_info中的内容复制到用户态空间buf中 ceph