第100+17步 ChatGPT学习:R实现Catboost分类

基于R 4.2.2版本演示一、写在前面有不少大佬问做机器学习分类能不能用R语言,不想学Python咯。答曰:可!用GPT或者Kimi转一下就得了呗。加上最近也没啥内容写了&a

基于R 4.2.2版本演示

一、写在前面

有不少大佬问做机器学习分类能不能用R语言,不想学Python咯。

答曰:可!用GPT或者Kimi转一下就得了呗。

加上最近也没啥内容写了,就帮各位搬运一下吧。

二、R代码实现Catboost分类

(1)导入数据

我习惯用RStudio自带的导入功能:

(2)建立Catboost模型(默认参数)

# Load necessary libraries
library(caret)
library(pROC)
library(ggplot2)
library(catboost)

# Assume 'data' is your dataframe containing the data
# Set seed to ensure reproducibility
set.seed(123)

# Split data into training and validation sets (80% training, 20% validation)
trainIndex <- createDataPartition(data$X, p = 0.8, list = FALSE)
trainData <- data[trainIndex, ]
validData <- data[-trainIndex, ]

# Prepare pools for CatBoost
trainPool <- catboost.load_pool(data = trainData[, -which(names(trainData) == "X")], label = trainData$X)
validPool <- catboost.load_pool(data = validData[, -which(names(validData) == "X")], label = validData$X)

# Define parameters for CatBoost
params <- list(
  iterations = 250,
  depth = 6,
  learning_rate = 0.1,
  l2_leaf_reg = 10,
  loss_function = "Logloss",
  eval_metric = "AUC"
)

# Train the CatBoost model
model <- catboost.train(learn_pool = trainPool, params = params)

# Predict on the training and validation sets using the correct parameter
trainPredict <- catboost.predict(model, trainPool, prediction_type = "Probability")
validPredict <- catboost.predict(model, validPool, prediction_type = "Probability")

# Convert predictions to binary using 0.5 as threshold
trainPredictBinary <- ifelse(trainPredict > 0.5, 1, 0)
validPredictBinary <- ifelse(validPredict > 0.5, 1, 0)

# 计算ROC对象
trainRoc <- roc(response = as.numeric(trainData$X) - 1, predictor = trainPredict)
validRoc <- roc(response = as.numeric(validData$X) - 1, predictor = validPredict)

# 使用ggplot绘制ROC曲线
trainRocPlot <- ggplot(data = data.frame(fpr = 1 - trainRoc$specificities, tpr = trainRoc$sensitivities), aes(x = fpr, y = tpr)) +
  geom_line(color = "blue") +
  geom_area(aes(ifelse(fpr <= 1, fpr, NA)), fill = "blue", alpha = 0.2) + # 使用条件表达式确保不超出坐标范围
  geom_abline(slope = 1, intercept = 0, linetype = "dashed", color = "black") +
  ggtitle("Training ROC Curve") +
  xlab("False Positive Rate") +
  ylab("True Positive Rate") +
  annotate("text", x = 0.5, y = 0.1, label = paste("Training AUC =", round(auc(trainRoc), 2)), hjust = 0.5, color = "blue")

validRocPlot <- ggplot(data = data.frame(fpr = 1 - validRoc$specificities, tpr = validRoc$sensitivities), aes(x = fpr, y = tpr)) +
  geom_line(color = "red") +
  geom_area(aes(ifelse(fpr <= 1, fpr, NA)), fill = "red", alpha = 0.2) + # 使用条件表达式确保不超出坐标范围
  geom_abline(slope = 1, intercept = 0, linetype = "dashed", color = "black") +
  ggtitle("Validation ROC Curve") +
  xlab("False Positive Rate") +
  ylab("True Positive Rate") +
  annotate("text", x = 0.5, y = 0.2, label = paste("Validation AUC =", round(auc(validRoc), 2)), hjust = 0.5, color = "red")

# 显示绘图
print(trainRocPlot)
print(validRocPlot)


# Calculate confusion matrices based on 0.5 cutoff for probability
confMatTrain <- table(trainData$X, trainPredict >= 0.5)
confMatValid <- table(validData$X, validPredict >= 0.5)

# Plot and display confusion matrices
plot_confusion_matrix <- function(pred, actual, dataset_name) {
  conf_mat <- table(Predicted = pred >= 0.5, Actual = actual)
  conf_mat_df <- as.data.frame(as.table(conf_mat))
  colnames(conf_mat_df) <- c("Actual", "Predicted", "Freq")
  
  p <- ggplot(data = conf_mat_df, aes(x = Predicted, y = Actual, fill = Freq)) +
    geom_tile(color = "white") +
    geom_text(aes(label = Freq), vjust = 1.5, color = "black", size = 5) +
    scale_fill_gradient(low = "white", high = "steelblue") +
    labs(title = paste("Confusion Matrix -", dataset_name, "Set"), x = "Predicted Class", y = "Actual Class") +
    theme_minimal() +
    theme(axis.text.x = element_text(angle = 45, hjust = 1), plot.title = element_text(hjust = 0.5))
  
  print(p)
}

# Call the function to plot confusion matrices for both training and validation sets
plot_confusion_matrix(trainPredict, trainData$X, "Training")
plot_confusion_matrix(validPredict, validData$X, "Validation")

# Extract values for calculations
a_train <- confMatTrain[1, 1]
b_train <- confMatTrain[1, 2]
c_train <- confMatTrain[2, 1]
d_train <- confMatTrain[2, 2]

a_valid <- confMatValid[1, 1]
b_valid <- confMatValid[1, 2]
c_valid <- confMatValid[2, 1]
d_valid <- confMatValid[2, 2]

# Training Set Metrics
acc_train <- (a_train + d_train) / sum(confMatTrain)
error_rate_train <- 1 - acc_train
sen_train <- d_train / (d_train + c_train)
sep_train <- a_train / (a_train + b_train)
precision_train <- d_train / (b_train + d_train)
F1_train <- (2 * precision_train * sen_train) / (precision_train + sen_train)
MCC_train <- (d_train * a_train - b_train * c_train) / sqrt((d_train + b_train) * (d_train + c_train) * (a_train + b_train) * (a_train + c_train))
auc_train <- roc(response = trainData$X, predictor = trainPredict)$auc

# Validation Set Metrics
acc_valid <- (a_valid + d_valid) / sum(confMatValid)
error_rate_valid <- 1 - acc_valid
sen_valid <- d_valid / (d_valid + c_valid)
sep_valid <- a_valid / (a_valid + b_valid)
precision_valid <- d_valid / (b_valid + d_valid)
F1_valid <- (2 * precision_valid * sen_valid) / (precision_valid + sen_valid)
MCC_valid <- (d_valid * a_valid - b_valid * c_valid) / sqrt((d_valid + b_valid) * (d_valid + c_valid) * (a_valid + b_valid) * (a_valid + c_valid))
auc_valid <- roc(response = validData$X, predictor = validPredict)$auc

# Print Metrics
cat("Training Metrics\n")
cat("Accuracy:", acc_train, "\n")
cat("Error Rate:", error_rate_train, "\n")
cat("Sensitivity:", sen_train, "\n")
cat("Specificity:", sep_train, "\n")
cat("Precision:", precision_train, "\n")
cat("F1 Score:", F1_train, "\n")
cat("MCC:", MCC_train, "\n")
cat("AUC:", auc_train, "\n\n")

cat("Validation Metrics\n")
cat("Accuracy:", acc_valid, "\n")
cat("Error Rate:", error_rate_valid, "\n")
cat("Sensitivity:", sen_valid, "\n")
cat("Specificity:", sep_valid, "\n")
cat("Precision:", precision_valid, "\n")
cat("F1 Score:", F1_valid, "\n")
cat("MCC:", MCC_valid, "\n")
cat("AUC:", auc_valid, "\n")

在R语言中,Catboost模型得单独安装,下面是一些可以调整的关键参数:

①学习率 (learning_rate):控制每步模型更新的幅度。较小的学习率可以提高模型的训练稳定性和准确性,但可能需要更多的时间和更多的树来收敛。

②树的深度 (depth):决定了每棵树的最大深度。较深的树可以更好地捕捉数据中的复杂关系,但也可能导致过拟合。

③树的数量 (iterations):模型中树的总数。更多的树可以增加模型的复杂度和能力,但同样可能导致过拟合。

④L2 正则化系数 (l2_leaf_reg):在模型的损失函数中增加一个正则项,以减少模型复杂度和过拟合风险。

⑤边界计数 (border_count):用于数值特征分箱的边界数量,影响模型在连续特征上的决策边界。

⑥类别特征组合深度 (cat_features):CatBoost 优化了对类别特征的处理,可以指定在模型中使用的类别特征。

⑦子采样 (subsample):指定每棵树训练时从训练数据集中随机抽取的比例,有助于防止模型过拟合。

⑧列采样 (colsample_bylevel,colsample_bytree):控制每棵树或每个级别使用的特征的比例,可以增加模型的多样性,降低过拟合风险。

⑨最小数据在叶节点 (min_data_in_leaf):叶节点必需的最小样本数量,增加这个参数的值可以防止模型学习过于具体的模式,从而降低过拟合风险。

⑩评估指标 (eval_metric):用于训练过程中模型评估的性能指标。

结果输出(随便挑的):

从AUC来看,Catboost随便一跑,就跑出过拟合了,跟Xgboost差不多。

三、Catboost调参

随便设置了一下,效果不明显,给各位自行嗨皮:

# Load necessary libraries
library(caret)
library(pROC)
library(ggplot2)
library(catboost)

# Assume 'data' is your dataframe containing the data
# Set seed to ensure reproducibility
set.seed(123)

# Convert the target variable to factor if not already
data$X <- as.factor(data$X)
data$X <- as.numeric(data$X) - 1

# Split data into training and validation sets (80% training, 20% validation)
trainIndex <- createDataPartition(data$X, p = 0.8, list = FALSE)
trainData <- data[trainIndex, ]
validData <- data[-trainIndex, ]

# Prepare CatBoost pools
trainPool <- catboost.load_pool(data = trainData[, -which(names(trainData) == "X")], label = trainData$X)
validPool <- catboost.load_pool(data = validData[, -which(names(validData) == "X")], label = validData$X)

# Define parameter grid
depths <- c(2, 4, 6)  # Reduced maximum depth
l2_leaf_regs <- c(1, 3, 5, 10, 20, 25)  # Increased maximum regularization
iterations <- c(500, 1000)  # Added higher iteration count for lower learning rates
learning_rates <- c(0.05, 0.1)  # Lower maximum learning rate
subsample <- 1.0  # Use 80% of data for each tree to prevent overfitting

best_auc <- 0
best_params <- list()

# Loop through parameter grid
for (depth in depths) {
  for (l2_leaf_reg in l2_leaf_regs) {
    for (iter in iterations) {
      for (learning_rate in learning_rates) {
        # Set parameters for this iteration
        params <- list(
          iterations = iter,
          depth = depth,
          learning_rate = learning_rate,
          l2_leaf_reg = l2_leaf_reg,
          loss_function = 'Logloss',
          eval_metric = 'AUC'
        )
        
        # Train the model
        model <- catboost.train(learn_pool = trainPool, test_pool = validPool, params = params)
        
        # Predict on the validation set
        validPredict <- catboost.predict(model, validPool)
        if (is.vector(validPredict)) {
          validPredictBinary <- ifelse(validPredict > 0.5, 1, 0)
        } else {
          # Assuming the second column is the probability of the positive class
          validPredictBinary <- ifelse(validPredict[, 2] > 0.5, 1, 0)
        }
        
        # Calculate AUC
        validRoc <- roc(response = as.numeric(validData$X) - 1, predictor = validPredictBinary)
        auc_score <- auc(validRoc)
        
        # Update best model if current AUC is better
        if (auc_score > best_auc) {
          best_auc <- auc_score
          best_params <- params
        }
      }
    }
  }
}

# Print the best AUC and corresponding parameters
print(paste("Best AUC:", best_auc))
print("Best Parameters:")
print(best_params)

# After parameter tuning, train the model with best parameters
model <- catboost.train(learn_pool = trainPool, params = best_params)

# Predict on the training and validation sets using the correct parameter
trainPredict <- catboost.predict(model, trainPool, prediction_type = "Probability")
validPredict <- catboost.predict(model, validPool, prediction_type = "Probability")

# Convert predictions to binary using 0.5 as threshold
trainPredictBinary <- ifelse(trainPredict > 0.5, 1, 0)
validPredictBinary <- ifelse(validPredict > 0.5, 1, 0)

# 计算ROC对象
trainRoc <- roc(response = as.numeric(trainData$X) - 1, predictor = trainPredict)
validRoc <- roc(response = as.numeric(validData$X) - 1, predictor = validPredict)

# 使用ggplot绘制ROC曲线
trainRocPlot <- ggplot(data = data.frame(fpr = 1 - trainRoc$specificities, tpr = trainRoc$sensitivities), aes(x = fpr, y = tpr)) +
  geom_line(color = "blue") +
  geom_area(aes(ifelse(fpr <= 1, fpr, NA)), fill = "blue", alpha = 0.2) + # 使用条件表达式确保不超出坐标范围
  geom_abline(slope = 1, intercept = 0, linetype = "dashed", color = "black") +
  ggtitle("Training ROC Curve") +
  xlab("False Positive Rate") +
  ylab("True Positive Rate") +
  annotate("text", x = 0.5, y = 0.1, label = paste("Training AUC =", round(auc(trainRoc), 2)), hjust = 0.5, color = "blue")

validRocPlot <- ggplot(data = data.frame(fpr = 1 - validRoc$specificities, tpr = validRoc$sensitivities), aes(x = fpr, y = tpr)) +
  geom_line(color = "red") +
  geom_area(aes(ifelse(fpr <= 1, fpr, NA)), fill = "red", alpha = 0.2) + # 使用条件表达式确保不超出坐标范围
  geom_abline(slope = 1, intercept = 0, linetype = "dashed", color = "black") +
  ggtitle("Validation ROC Curve") +
  xlab("False Positive Rate") +
  ylab("True Positive Rate") +
  annotate("text", x = 0.5, y = 0.2, label = paste("Validation AUC =", round(auc(validRoc), 2)), hjust = 0.5, color = "red")

# 显示绘图
print(trainRocPlot)
print(validRocPlot)


# Calculate confusion matrices based on 0.5 cutoff for probability
confMatTrain <- table(trainData$X, trainPredict >= 0.5)
confMatValid <- table(validData$X, validPredict >= 0.5)

# Function to plot confusion matrix using ggplot2
plot_confusion_matrix <- function(conf_mat, dataset_name) {
  conf_mat_df <- as.data.frame(as.table(conf_mat))
  colnames(conf_mat_df) <- c("Actual", "Predicted", "Freq")
  
  p <- ggplot(data = conf_mat_df, aes(x = Predicted, y = Actual, fill = Freq)) +
    geom_tile(color = "white") +
    geom_text(aes(label = Freq), vjust = 1.5, color = "black", size = 5) +
    scale_fill_gradient(low = "white", high = "steelblue") +
    labs(title = paste("Confusion Matrix -", dataset_name, "Set"), x = "Predicted Class", y = "Actual Class") +
    theme_minimal() +
    theme(axis.text.x = element_text(angle = 45, hjust = 1), plot.title = element_text(hjust = 0.5))
  
  print(p)
}

# Now call the function to plot and display the confusion matrices
plot_confusion_matrix(confMatTrain, "Training")
plot_confusion_matrix(confMatValid, "Validation")

# Extract values for calculations
a_train <- confMatTrain[1, 1]
b_train <- confMatTrain[1, 2]
c_train <- confMatTrain[2, 1]
d_train <- confMatTrain[2, 2]

a_valid <- confMatValid[1, 1]
b_valid <- confMatValid[1, 2]
c_valid <- confMatValid[2, 1]
d_valid <- confMatValid[2, 2]

# Training Set Metrics
acc_train <- (a_train + d_train) / sum(confMatTrain)
error_rate_train <- 1 - acc_train
sen_train <- d_train / (d_train + c_train)
sep_train <- a_train / (a_train + b_train)
precision_train <- d_train / (b_train + d_train)
F1_train <- (2 * precision_train * sen_train) / (precision_train + sen_train)
MCC_train <- (d_train * a_train - b_train * c_train) / sqrt((d_train + b_train) * (d_train + c_train) * (a_train + b_train) * (a_train + c_train))
auc_train <- roc(response = trainData$X, predictor = trainPredict)$auc

# Validation Set Metrics
acc_valid <- (a_valid + d_valid) / sum(confMatValid)
error_rate_valid <- 1 - acc_valid
sen_valid <- d_valid / (d_valid + c_valid)
sep_valid <- a_valid / (a_valid + b_valid)
precision_valid <- d_valid / (b_valid + d_valid)
F1_valid <- (2 * precision_valid * sen_valid) / (precision_valid + sen_valid)
MCC_valid <- (d_valid * a_valid - b_valid * c_valid) / sqrt((d_valid + b_valid) * (d_valid + c_valid) * (a_valid + b_valid) * (a_valid + c_valid))
auc_valid <- roc(response = validData$X, predictor = validPredict)$auc

# Print Metrics
cat("Training Metrics\n")
cat("Accuracy:", acc_train, "\n")
cat("Error Rate:", error_rate_train, "\n")
cat("Sensitivity:", sen_train, "\n")
cat("Specificity:", sep_train, "\n")
cat("Precision:", precision_train, "\n")
cat("F1 Score:", F1_train, "\n")
cat("MCC:", MCC_train, "\n")
cat("AUC:", auc_train, "\n\n")

cat("Validation Metrics\n")
cat("Accuracy:", acc_valid, "\n")
cat("Error Rate:", error_rate_valid, "\n")
cat("Sensitivity:", sen_valid, "\n")
cat("Specificity:", sep_valid, "\n")
cat("Precision:", precision_valid, "\n")
cat("F1 Score:", F1_valid, "\n")
cat("MCC:", MCC_valid, "\n")
cat("AUC:", auc_valid, "\n")

结果输出:

提供个样本代码吧,我不调了。

五、最后

至于怎么安装,自学了哈。

数据嘛:

链接:https://pan.baidu/s/1rEf6JZyzA1ia5exoq5OF7g?pwd=x8xm

提取码:x8xm

发布者:admin,转转请注明出处:http://www.yc00.com/web/1754769683a5199992.html

相关推荐

  • 如何使用ChatGPT写书

    开始您的创作之旅&#xff0c;让 ChatGPT成为您的创意盟友。这款 AI 工具擅长创造创意、协助概述和制作材料以及提供编辑建议。无缝互动&#xff0c;讨论故事转折、调整角色弧线并增强您的叙述。但是&#xff0

    1月前
    100
  • ChatGPT详解

    ChatGPT是一款由OpenAI研发和维护的先进的自然语言处理模型(NLP),全名为Chat Generative Pre-trained Transformer,于2022年11月30日发布。以下是对ChatGPT的详细介绍: ###

    1月前
    180
  • ChatGPT 的应用案例

    ChatGPT 在客服中的应用 随着人工智能技术的不断发展,ChatGPT(基于对话生成的预训练模型)在客服中的应用越来越广泛。ChatGPT可以帮助企业提高客户服务质量,减少客服人员的工作量,增加客户满意度。本文将举例说明ChatGPT

    1月前
    260
  • 【大模型】ChatGPT 打造个人专属GPTs助手使用详解

    目录 一、前言 二、GPTs介绍 2.1 GPTs是什么 2.2 GPTs工作原理 2.3 GPTs 主要功能 2.4 GPTs 应用场景 2.5 GPTs 优缺点 三、GPTs 创建个人专属应用操作过程 3.1 内置GPT

    1月前
    160
  • 借助ChatGPT实现图像生成的多元途径

    ChatGPT 自身没办法直接生成图像或者作图,但咱们可以通过与其他图形生成工具整合,或是编写代码来实现图像生成。毕竟 ChatGPT 作为文本生成模型,主要 “功夫” 都在处理和生成文本上。不过别担心,结合其他工具和方法,照样能让图像生成

    1月前
    190
  • BIOS详解---ChatGPT o3作答

    从 Legacy BIOS → UEFI 固件 的演进&#xff0c;到 2024-2025 年加进 AI 自动超频、USB 免 CPU 刷写、64 MB 大容量 ROM 等新花样&#xff0c;主板 BIOS 已不仅是“开

    1月前
    240
  • 如何开发一款 ChatGPT 代码解释器插件

    让没有 ChatGPT Plus 会员的小伙伴也能用上低配版代码解释器&#xff08;Code Interpreter&#xff09;功能&#xff0c;秘诀就是自己造轮子&#xff0c;话不多少&#

    1月前
    190
  • ChatGPT代码解释器使用指南:AI编程助手

    ChatGPT代码解释器使用指南:AI编程助手 关键词:ChatGPT、代码解释器、AI编程助手、自然语言编程、代码调试、编程学习、自动化编程 摘要:本文将深入探讨ChatGPT代码解释器的使用方法和原理,通过生动形象的比喻和详细的代码示例

    1月前
    150
  • 【花雕学编程】ESP32 ChatGPT之基础 ChatGPT 请求

    Arduino是一个开放源码的电子原型平台&#xff0c;它可以让你用简单的硬件和软件来创建各种互动的项目。Arduino的核心是一个微控制器板&#xff0c;它可以通过一系列的引脚来连接各种传感器、执行器、显示器等外部设备

    1月前
    160
  • chatgpt How to call functions with chat models

    https:githubopenaiopenai-cookbookblobmainexamplesHow_to_call_functions_with_chat_models.ipynb This notebook cov

    1月前
    250
  • 基于LM Studio + LLaMA3 建立本地化的ChatGPT

    4月19日,Facebook母公司Meta重磅推出了Llama3。即便大家现在对于大厂和巨头频繁迭代AI模型的行为已经见怪不怪,Meta的Llama3仍旧显得与众不同,因为这是迄今最强大的开源AI模型。LLaMA模型通常采用了类似于GPT(

    1月前
    150
  • 有形皆误,实用者存---ChatGPT o3作答

    “All models are wrong, but some are useful.”——George E. P. Box 出处 统计学家 George E. P. Box 在 1976 年《Journal of the American

    1月前
    120
  • 不止ChatGPT!2024年最值得试的20款AI写作工具!

    在当今信息爆炸的时代&#xff0c;无论是在新媒体行业中撰写文案&#xff0c;还是在学术领域编写科研论文&#xff0c;甚至是日常的工作总结&#xff0c;写作需求无处不在。尤其是在面对紧急任务时&

    1月前
    230
  • 1分钟教你找到好用的国内chatGPT网站!

    首先国内的chatgpt基本上都是基于openAI官网chatGPT 的API服务进行开发的。 通过将API接口集成到网站当中,通过API调用到openAI的GPT语言模型,具体流程就是:当用户发送问题之后,会通过API秘钥传递给open

    1月前
    260
  • 3个好用免费的ChatGPT网站

    AI 大模型的出现给时代带来了深远的影响: 改变了产业格局:AI 大模型的发展推动了人工智能技术在各行业的广泛应用,改变了传统产业的运作方式,促进了新兴产业的崛起,如智能驾驶、医疗健康、金融科技等。 提升了科学研究水平:AI 大模型的应用加

    1月前
    220
  • ChatGPT将引发大量而普遍的网络安全隐患

    ChatGPT是一个基于人工智能的语言生成模型&#xff0c;它可以在任何给定的时间&#xff0c;使用自然语言生成技术&#xff0c;生成文本、对话和文章。它不仅可以被用来编写文本&#xff0c;还可以用来

    1月前
    200
  • ChatGPT网站小蜜蜂AI更新了

    ChatGPT网站小蜜蜂AI更新了 前阶段郭震兄弟刚开发小蜜蜂AI网站的的时候,写了一篇关于ChatGPT的网站小蜜蜂AI的博文[https:blog.csdnweixin_41905135articledetails1352

    1月前
    170
  • ChatGPT Agent深度解析:告别单纯问答,一个指令搞定复杂任务?

    名人说:博观而约取,厚积而薄发。——苏轼《稼说送张琥》 创作者:Code_流苏(CSDN)(一个喜欢古诗词和编程的Coder

    1月前
    170
  • 分享全网最全的ChatGPT资料

    ❝ &#x1f4a1; 全网最全的ChatGPT资料尽在 顶尖架构师栈 ❝ 图标说明 ✅&#xff1a;直接使用 &#x1f510;&#xff1a;需要注册登录 &#x1f9d7;‍♀️&#x

    1月前
    190
  • 100多个ChatGPT指令提示词分享

    当前&#xff0c;ChatGPT几乎已经占领了整个互联网。全球范围内成千上万的用户正使用这款人工智能驱动的聊天机器人来满足各种需求。然而&#xff0c;并不是每个人都知道如何充分有效地利用ChatGPT的潜力。其实有许多令

    1月前
    210

发表回复

评论列表(0条)

  • 暂无评论

联系我们

400-800-8888

在线咨询: QQ交谈

邮件:admin@example.com

工作时间:周一至周五,9:30-18:30,节假日休息

关注微信