Prechádzať zdrojové kódy

初步实现HDFS文件迁移到minio的逻辑

lijie 3 rokov pred
rodič
commit
6d9a1b3e01

+ 255 - 0
dmp-cloud/dmp-file/src/main/java/com/persagy/dmp/file/controller/FileMigrateController.java

@@ -0,0 +1,255 @@
+package com.persagy.dmp.file.controller;
+
+import cn.hutool.core.collection.CollUtil;
+import cn.hutool.core.io.FastByteArrayOutputStream;
+import cn.hutool.core.io.IoUtil;
+import cn.hutool.core.map.MapUtil;
+import cn.hutool.core.thread.ThreadUtil;
+import cn.hutool.core.util.StrUtil;
+import cn.hutool.http.HttpUtil;
+import com.alibaba.fastjson.JSONArray;
+import com.alibaba.fastjson.JSONObject;
+import com.baomidou.mybatisplus.core.toolkit.IdWorker;
+import com.persagy.dmp.common.constant.ResponseCode;
+import com.persagy.dmp.common.context.AppContext;
+import com.persagy.dmp.common.exception.BusinessException;
+import com.persagy.dmp.common.lang.PsDateTime;
+import com.persagy.dmp.common.model.response.CommonResult;
+import com.persagy.dmp.common.utils.ResultHelper;
+import com.persagy.dmp.file.model.FileInfo;
+import com.persagy.dmp.file.model.FileInfoCreator;
+import com.persagy.dmp.file.service.IFileService;
+import com.persagy.dmp.mybatis.helper.DynamicDataSourceHelper;
+import io.minio.*;
+import io.minio.errors.*;
+import lombok.RequiredArgsConstructor;
+import lombok.extern.slf4j.Slf4j;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.*;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.core.io.Resource;
+import org.springframework.core.io.support.PathMatchingResourcePatternResolver;
+import org.springframework.core.io.support.ResourcePatternResolver;
+import org.springframework.web.bind.annotation.PostMapping;
+import org.springframework.web.bind.annotation.RequestBody;
+import org.springframework.web.bind.annotation.RequestMapping;
+import org.springframework.web.bind.annotation.RestController;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.security.InvalidKeyException;
+import java.security.NoSuchAlgorithmException;
+import java.util.*;
+
+/**
+ * 版本定义Controller
+ * @author Charlie Yu
+ * @date 2021-06-23
+ */
+@Slf4j
+@RestController
+@RequestMapping("/migrate/file")
+@RequiredArgsConstructor
+public class FileMigrateController {
+
+    /**jar同级config目录下的位置*/
+    private static final String FILE_CONFIG_PATH="file:./config/";
+    /**Classpath位置*/
+    private static final String FILE_CLASSPATH_PATH="classpath:./";
+    /**hdfs-site.xml文件名称*/
+    private static final String HDFS_SITE_FILE_NAME ="hdfs-site.xml";
+    /**core-site.xml文件名称*/
+    private static final String HDFS_CORE_FILE_NAME ="core-site.xml";
+    /**hdfs访问地址*/
+    private static final String HDFS_URL_KEY="fs.defaultFS";
+    /** 文件key默认存储路径  */
+    private final static String BASE_FILE_PATH="/test/files";
+    /**hdfs访问用户名*/
+    private static final String HADOOP_USER_NAME="HADOOP_USER_NAME";
+
+    private final IFileService fileService;
+
+    /**
+     * 启动文件迁移
+     * 迁移范围:hbase迁移至minio
+     * @param requestMap
+     * {
+     *     "hdfsUrl":"http://node1:8020",
+     *     "fileBasePath":"/test/files",
+     *     "hdfsUser":"saga",
+     *     "minioUrl":"http://192.168.100.210:9000",
+     *     "minioUser":"root",
+     *     "minioSecret":"persagy@2020"
+     * }
+     */
+    @PostMapping("/start")
+    public CommonResult<String> query(@RequestBody Map<String, Object> requestMap) {
+        // 处理参数
+        final String hdfsUrl = MapUtil.getStr(requestMap, "hdfsUrl");
+        final String fileBasePath = MapUtil.getStr(requestMap, "fileBasePath");
+        final String hdfsUser = MapUtil.getStr(requestMap, "hdfsUser");
+        final String minioUrl = MapUtil.getStr(requestMap, "minioUrl");
+        final String minioUser = MapUtil.getStr(requestMap, "minioUser");
+        final String minioSecret = MapUtil.getStr(requestMap, "minioSecret");
+        final String groupCode = MapUtil.getStr(requestMap, "groupCode","persagy");
+        if(StrUtil.isBlank(hdfsUrl)
+                || StrUtil.isBlank(fileBasePath)
+                || StrUtil.isBlank(hdfsUser)
+                || StrUtil.isBlank(minioUrl)
+                || StrUtil.isBlank(minioUser)
+                || StrUtil.isBlank(minioSecret)) {
+            throw new BusinessException(ResponseCode.A0400.getCode(), ResponseCode.A0400.getDesc());
+        }
+        // 启动迁移 执行时间较长,后台线程运行 
+        // TODO 需要加分布式锁,并支持查看执行情况
+        ThreadUtil.execute(() -> startMigrate(hdfsUrl,fileBasePath,hdfsUser,minioUrl,minioUser,minioSecret,groupCode));
+        return ResultHelper.success();
+    }
+    /***
+     * Description: 启动数据迁移
+     * @param hdfsUrl : HDFS的文件路径
+     * @param fileBasePath : 文件路径
+     * @param hdfsUser : HDFS文件服务器访问权限用户
+     * @param minioUrl : minio地址
+     * @param accessKey : minio用户
+     * @param secretKey : minio密码
+     * @param groupCode : 集团编码
+     * @return : void  
+     * @author : lijie
+     * @date :2021/9/9 23:31
+     * Update By lijie 2021/9/9 23:31
+     */
+    private void startMigrate(String hdfsUrl, String fileBasePath, String hdfsUser, String minioUrl, String accessKey,
+                              String secretKey, String groupCode){
+        // 1.创建minio客户端
+        MinioClient minioClient = MinioClient.builder()
+                .endpoint(minioUrl)
+                .region("GMT+8")
+                .credentials(accessKey, secretKey)
+                .build();
+        // 2.创建HDFS文件流
+        FileSystem fileSystem = null;
+        try {
+            if (!minioClient.bucketExists(BucketExistsArgs.builder().bucket(groupCode).build())){
+                minioClient.makeBucket(MakeBucketArgs.builder().bucket(groupCode).region("GMT+8").build());
+            }
+            fileSystem = FileSystem.get(createConfig(hdfsUrl,hdfsUser));
+            RemoteIterator<LocatedFileStatus> listFiles = fileSystem.listFiles(new Path(BASE_FILE_PATH + "/" + fileBasePath), true);
+            while (listFiles.hasNext()){
+                LocatedFileStatus fileStatus = listFiles.next();
+                if (fileStatus.isDirectory()){
+                    continue;
+                }
+                Path path = fileStatus.getPath();
+                FileInfo fileInfo = FileInfoCreator.of(groupCode, IdWorker.getIdStr(), groupCode, path.getName());
+                fileInfo.setCreationTime(new PsDateTime());
+                fileService.insertFile(fileInfo);
+                FSDataInputStream dataInputStream = fileSystem.open(path);
+                InputStream wrappedStream = dataInputStream.getWrappedStream();
+                minioClient.putObject(PutObjectArgs.builder()
+                        .bucket(groupCode)
+                        .object(path.getName())
+                        .stream(wrappedStream,-1, 10485760).build());
+                System.out.println(fileStatus.getPath().getName());
+            }
+            // 处理完文件后直接删除hdfs文件夹
+            fileSystem.delete(new Path(BASE_FILE_PATH + "/" + fileBasePath), true);
+        }catch (Exception e){
+            log.error("转移文件失败",e);
+        }finally {
+            if (null!=fileSystem){
+                try {
+                    fileSystem.close();
+                } catch (IOException e) {
+                    log.error("关闭流失败",e);
+                }
+            }
+        }
+    }
+    /***
+     * Description: 创建HDFS配置类
+     * @param url : hdfs的服务器地址
+     * @return : org.apache.hadoop.conf.Configuration
+     * @author : lijie
+     * @date :2021/9/9 23:37
+     * Update By lijie 2021/9/9 23:37
+     */
+    public Configuration createConfig(String url,String accessKey) {
+        Configuration plainConfig = new Configuration();
+        try {
+            System.setProperty(HADOOP_USER_NAME,accessKey);
+            String hbaseSiteConfigFilePath = FILE_CONFIG_PATH+ HDFS_SITE_FILE_NAME;
+            String hbaseSiteClasspathFilePath = FILE_CLASSPATH_PATH+ HDFS_SITE_FILE_NAME;
+            String hbaseCoreConfigFilePath = FILE_CONFIG_PATH+ HDFS_CORE_FILE_NAME;
+            String hbaseCoreClasspathFilePath = FILE_CLASSPATH_PATH+ HDFS_CORE_FILE_NAME;
+            log.info("hdfs-site.xml文件存在情况,config目录:{},classPath:{}",checkResoucesExist(hbaseSiteConfigFilePath),
+                    checkResoucesExist(hbaseSiteClasspathFilePath));
+            log.info("core-site.xml文件存在情况,config目录:{},classPath:{}",checkResoucesExist(hbaseCoreConfigFilePath),
+                    checkResoucesExist(hbaseCoreClasspathFilePath));
+            log.info("classpath下的文件是否存在:"+checkResoucesExist(hbaseSiteClasspathFilePath));
+            // 导入hbase-site.xml文件
+            Resource[] siteResources = getResource(hbaseSiteConfigFilePath, hbaseSiteClasspathFilePath);
+            if (siteResources.length<1){
+                throw new BusinessException(ResponseCode.B0300.getCode(),"hdfs-site.xml文件不存在");
+            }
+            // 导入hbase-core.xml文件
+            Resource[] coreResources = getResource(hbaseCoreConfigFilePath, hbaseCoreClasspathFilePath);
+            if (coreResources.length<1){
+                throw new BusinessException(ResponseCode.B0300.getCode(),"core-site.xml文件不存在");
+            }
+            plainConfig.set(HDFS_URL_KEY,url);
+            Resource siteResource = siteResources[0];
+            plainConfig.addResource(siteResource.getInputStream());
+            Resource coreResource = coreResources[0];
+            plainConfig.addResource(coreResource.getInputStream());
+        }catch (Exception e) {
+            log.error("加载HDFS配置文件失败!",e);
+        }
+        return plainConfig;
+    }
+
+    /***
+     * @Description: 校验资源文件是否存在
+     * @param configFilePath : jar包同级目录下的配置文件
+     * @return : com.alibaba.fastjson.JSONArray
+     * @author: lijie
+     * @Date:2020/6/6 19:43
+     * Update By lijie 2020/6/6 19:43
+     */
+    private static boolean checkResoucesExist(String configFilePath) throws IOException {
+        ResourcePatternResolver resolver = new PathMatchingResourcePatternResolver();
+        Resource[] resolverResources = resolver.getResources(configFilePath);
+        if (resolverResources.length > 0 && resolverResources[0].exists()){
+            return true;
+        }
+        return false;
+    }
+
+    /***
+     * Description: 获取资源文件方法
+     * @param configFilePath : 配置文件路径
+     * @param classpathFilePath : classpath下的文件
+     * @return : org.springframework.core.io.Resource[]
+     * @author : lijie
+     * @date :2021/3/1 14:25
+     * Update By lijie 2021/3/1 14:25
+     */
+    public static Resource[] getResource(String configFilePath, String classpathFilePath)
+            throws IOException {
+        ResourcePatternResolver resolver = new PathMatchingResourcePatternResolver();
+        Resource[] resolverResources = resolver.getResources(configFilePath);
+        if (resolverResources.length > 0) {
+            boolean exist = true;
+            for (Resource resource : resolverResources) {
+                if (!resource.exists()) {
+                    exist = false;
+                    break;
+                }
+            }
+            if(exist) {
+                return resolverResources;
+            }
+        }
+        return resolver.getResources(classpathFilePath);
+    }
+}

+ 1 - 0
dmp-comp/dmp-file-starter/README.md

@@ -2,6 +2,7 @@
 ============ 
 文件服务客户端
 - 支持MinIO文件服务器
+- 支持HDFS文件服务器
 - 支持文件管理
 - 图片自动压缩(压缩至200K以下)
 文件管理服务只存储文件信息,文件上传由客户端直接上传至文件服务器中,不在文件管理服务做额外中转。

+ 7 - 4
dmp-comp/dmp-file-starter/src/main/java/com/persagy/dmp/storage/config/HdfsConfig.java

@@ -40,7 +40,7 @@ public class HdfsConfig {
     /**hdfs访问地址*/
     private static final String HDFS_URL_KEY="fs.defaultFS";
     /**hdfs访问用户名*/
-    //private static final String HADOOP_USER_NAME="HADOOP_USER_NAME";
+    private static final String HADOOP_USER_NAME="HADOOP_USER_NAME";
 
     /** 文件服务器url */
     private String url;
@@ -54,12 +54,15 @@ public class HdfsConfig {
     public org.apache.hadoop.conf.Configuration plainConfig() {
         org.apache.hadoop.conf.Configuration plainConfig = new org.apache.hadoop.conf.Configuration();
         try {
+            System.setProperty(HADOOP_USER_NAME,accessKey);
             String hbaseSiteConfigFilePath = FILE_CONFIG_PATH+ HDFS_SITE_FILE_NAME;
             String hbaseSiteClasspathFilePath = FILE_CLASSPATH_PATH+ HDFS_SITE_FILE_NAME;
+            String hbaseCoreConfigFilePath = FILE_CONFIG_PATH+ HDFS_CORE_FILE_NAME;
+            String hbaseCoreClasspathFilePath = FILE_CLASSPATH_PATH+ HDFS_CORE_FILE_NAME;
             log.info("hdfs-site.xml文件存在情况,config目录:{},classPath:{}",checkResoucesExist(hbaseSiteConfigFilePath),
                     checkResoucesExist(hbaseSiteClasspathFilePath));
-            log.info("core-site.xml文件存在情况,config目录:{},classPath:{}",checkResoucesExist(hbaseSiteConfigFilePath),
-                    checkResoucesExist(hbaseSiteClasspathFilePath));
+            log.info("core-site.xml文件存在情况,config目录:{},classPath:{}",checkResoucesExist(hbaseCoreConfigFilePath),
+                    checkResoucesExist(hbaseCoreClasspathFilePath));
             log.info("classpath下的文件是否存在:"+checkResoucesExist(hbaseSiteClasspathFilePath));
             // 导入hbase-site.xml文件
             Resource[] siteResources = getResource(hbaseSiteConfigFilePath, hbaseSiteClasspathFilePath);
@@ -67,7 +70,7 @@ public class HdfsConfig {
                 throw new BusinessException(ResponseCode.B0300.getCode(),"hdfs-site.xml文件不存在");
             }
             // 导入hbase-core.xml文件
-            Resource[] coreResources = getResource(hbaseSiteConfigFilePath, hbaseSiteClasspathFilePath);
+            Resource[] coreResources = getResource(hbaseCoreConfigFilePath, hbaseCoreClasspathFilePath);
             if (coreResources.length<1){
                 throw new BusinessException(ResponseCode.B0300.getCode(),"core-site.xml文件不存在");
             }