Project1
标题: CSV转Json实验日志: 设定表格转游戏数据 (新增excel转换) [打印本页]
作者: 沉滞的剑 时间: 2020-6-8 02:29
标题: CSV转Json实验日志: 设定表格转游戏数据 (新增excel转换)
本帖最后由 沉滞的剑 于 2020-6-8 13:30 编辑
继续搞事情....
设定用表格很方便, 但是游戏中还是JSON结构清楚
写一遍设定再录入一遍数据实在是太蠢了, 所以需要搞一个脚本来辅助工作
我有很多张CSV表格, 我要把它们每一张都输出为Json格式, 并且还想支持一些简单的不定长的数据结构
例子1 - 最简单的情形
我需要一些文本描述:
id,name,note
hp,生命,"人物能够承受的伤害, 初始为12点"
belly,饱腹,"人物能承受的饥饿, 初始为15点, 饱腹不足时将由生命值承受饥饿"
exp,经验,"用来提升最大生命或者属性的资源. 每100点经验可以换取1点最大生命或1点属性点, 每次换取属性点的花费增加50."
gp,金币,"用来交易物品的资源"
attr,属性,"表示角色的不同能力的强度, 每一项最多10点"
str,力量,"人物属性, 每点力量提高1点攻击, 初始为1点"
int,智力,"人物属性, 每点智力获得额外5点战斗经验, 初始为1点"
spd,速度,"人物属性, 战斗中速度高的角色先出手, 初始为1点"
weapon,武器,"提供额外攻击的装备"
armor,防具,"提供额外防御的装备"
accessory,饰品,"提供额外效果的装备"
atk,攻击伤害,"攻击所能造成的基本伤害"
def,伤害抵抗,"能够抵消一次攻击的攻击伤害总量, 最多降低伤害到1点"
CSV要求每一列用逗号分隔, 如果文本中有都好需要用双引号将整个文本括起来
最后输出的结果是:
[
{ "id": "hp", "name": "生命", "note": "人物能够承受的伤害, 初始为12点" },
{ "id": "belly", "name": "饱腹", "note": "人物能承受的饥饿, 初始为15点, 饱腹不足时将由生命值承受饥饿" },
{
"id": "exp",
"name": "经验",
"note": "用来提升最大生命或者属性的资源. 每100点经验可以换取1点最大生命或1点属性点, 每次换取属性点的花费增加50."
},
{ "id": "gp", "name": "金币", "note": "用来交易物品的资源" },
{ "id": "attr", "name": "属性", "note": "表示角色的不同能力的强度, 每一项最多10点" },
{ "id": "str", "name": "力量", "note": "人物属性, 每点力量提高1点攻击, 初始为1点" },
{ "id": "int", "name": "智力", "note": "人物属性, 每点智力获得额外5点战斗经验, 初始为1点" },
{ "id": "spd", "name": "速度", "note": "人物属性, 战斗中速度高的角色先出手, 初始为1点" },
{ "id": "weapon", "name": "武器", "note": "提供额外攻击的装备" },
{ "id": "armor", "name": "防具", "note": "提供额外防御的装备" },
{ "id": "accessory", "name": "饰品", "note": "提供额外效果的装备" },
{ "id": "atk", "name": "攻击伤害", "note": "攻击所能造成的基本伤害" },
{ "id": "def", "name": "伤害抵抗", "note": "能够抵消一次攻击的攻击伤害总量, 最多降低伤害到1点" }
]
例子2 - 不定长的数据
我需要定义一种'效果'函数, 它接收不定长数量的参数, 我希望能转换为参数列表并附带描述:
id,name,type,params,note
atk,伤害,always,dmg:伤害加成,攻击伤害提高$1
atkAttr,属性加成伤害,always,attr:属性类型;dmg:每点属性提供的伤害加成,每点#1提高$2点攻击伤害
最后输出的结果是:
[
{
"id": "atk",
"name": "伤害",
"type": "always",
"params": [{ "name": "dmg", "note": "伤害加成" }],
"note": "攻击伤害提高$1"
},
{
"id": "atkAttr",
"name": "属性加成伤害",
"type": "always",
"params": [
{ "name": "attr", "note": "属性类型" },
{ "name": "dmg", "note": "每点属性提供的伤害加成" }
],
"note": "每点#1提高$2点攻击伤害"
}
]
我约定xxx:yyy的结构代表名称为xxx, 描述为yyy
例子3 - 另一个不定长的数据
我需要让其他数据使用我之前定义的效果函数, 而且让它看上去是另一种语法
id,name,effects,type,value,note
shortsword,短剑,"atk(5)",dagger,10,"轻便灵活的护身武器, 新人冒险家用它来杀死野狼."
sword,长剑,"atk(8)",sword,20,"从雇佣兵到骑士团, 人人都爱用的可靠武器."
lance,骑枪,"atk(5);atkAttr(spd,3)",polearms,80,"冲锋! 骑枪之下众生平等."
转换后的结果为:
[
{
"id": "shortsword",
"name": "短剑",
"effects": [{ "id": "atk", "params": ["5"] }],
"type": "dagger",
"value": "10",
"note": "轻便灵活的护身武器, 新人冒险家用它来杀死野狼."
},
{
"id": "sword",
"name": "长剑",
"effects": [{ "id": "atk", "params": ["8"] }],
"type": "sword",
"value": "20",
"note": "从雇佣兵到骑士团, 人人都爱用的可靠武器."
},
{
"id": "lance",
"name": "骑枪",
"effects": [
{ "id": "atk", "params": ["5"] },
{ "id": "atkAttr", "params": ["spd", "3"] }
],
"type": "polearms",
"value": "80",
"note": "冲锋! 骑枪之下众生平等."
}
]
将多个效果和它的参数解析出来, 同时保证原数据还具有一定的可读性.
最后脚本是用NodeJS写的, 其实也可以考虑用Python, 如果用pandas一定会实现更加复杂的效果, 至少不需要手写csv的解析了
const fs = require("fs");
const path = require("path");
const _ = require("lodash");
const parseCol = (name, col) => {
const list = col.split(";").filter(Boolean);
const head = _.head(list);
if (["id", "name", "note"].includes(name)) return col;
if (/^.+:.+$/.test(head)) {
return list.map((item) => {
const pairs = item.split(":");
return {
name: pairs[0],
note: pairs[1],
};
});
} else if (/^.+[(].+[)]$/.test(head)) {
return list.map((item) => {
const [_, id, params] = item.match(/^(.+)[(](.+[,]?)+[)]$/);
return {
id,
params: params.split(","),
};
});
} else if (/^\[.+\]$/.test(col)) {
return col.split(",");
}
return col;
};
const parseFile = async (src, dst, filename) => {
const data = await fs.promises.readFile(path.resolve(src, filename), "utf-8");
const lines = data.split("\r\n").map((item) => {
let flag = true;
const list = [0];
const subStrings = [];
item.split("").forEach((v, i) => {
if (v === '"') return (flag = !flag);
if (v === "," && flag) {
list.push(i);
subStrings.push(item.substring(list[0], list[1]).replace(/^["](.*)["]$/, "$1"));
list.shift();
list[0] += 1;
}
});
subStrings.push(item.substring(list[0]).replace(/^["](.*)["]$/, "$1"));
return subStrings;
});
const head = lines.shift();
const obj = lines.map((line) => _.fromPairs(line.map((col, index) => [head[index], parseCol(head[index], col)])));
await fs.promises.writeFile(path.resolve(dst, filename.replace(/csv$/, "json")), JSON.stringify(obj));
};
// 程序入口, 将src文件夹下的所有文件转化为json格式(不要放其他类型的文件和文件夹进去)
const src = "./data/Cards/CSV";
const dst = "./data/Cards/JSON";
const filenames = fs.readdirSync(src, "utf-8");
filenames.forEach((item) => {
parseFile(src, dst, item);
});
const fs = require("fs");
const path = require("path");
const _ = require("lodash");
const parseCol = (name, col) => {
const list = col.split(";").filter(Boolean);
const head = _.head(list);
if (["id", "name", "note"].includes(name)) return col;
if (/^.+:.+$/.test(head)) {
return list.map((item) => {
const pairs = item.split(":");
return {
name: pairs[0],
note: pairs[1],
};
});
} else if (/^.+[(].+[)]$/.test(head)) {
return list.map((item) => {
const [_, id, params] = item.match(/^(.+)[(](.+[,]?)+[)]$/);
return {
id,
params: params.split(","),
};
});
} else if (/^\[.+\]$/.test(col)) {
return col.split(",");
}
return col;
};
const parseFile = async (src, dst, filename) => {
const data = await fs.promises.readFile(path.resolve(src, filename), "utf-8");
const lines = data.split("\r\n").map((item) => {
let flag = true;
const list = [0];
const subStrings = [];
item.split("").forEach((v, i) => {
if (v === '"') return (flag = !flag);
if (v === "," && flag) {
list.push(i);
subStrings.push(item.substring(list[0], list[1]).replace(/^["](.*)["]$/, "$1"));
list.shift();
list[0] += 1;
}
});
subStrings.push(item.substring(list[0]).replace(/^["](.*)["]$/, "$1"));
return subStrings;
});
const head = lines.shift();
const obj = lines.map((line) => _.fromPairs(line.map((col, index) => [head[index], parseCol(head[index], col)])));
await fs.promises.writeFile(path.resolve(dst, filename.replace(/csv$/, "json")), JSON.stringify(obj));
};
// 程序入口, 将src文件夹下的所有文件转化为json格式(不要放其他类型的文件和文件夹进去)
const src = "./data/Cards/CSV";
const dst = "./data/Cards/JSON";
const filenames = fs.readdirSync(src, "utf-8");
filenames.forEach((item) => {
parseFile(src, dst, item);
});
新增用pandas写的直接将excel的所有sheets转换为JSON
import pandas as pd
import numpy as np
import json
import re
excel_path = 'data/Cards/Excel/cards.xlsx'
json_path = 'data/Cards/JSON/'
def parseCol(col):
partterns = {
'func': {
'pattern': r'(.+?)[(](.*?)[)];?',
'method': lambda matches: [
{
'name': x[0],
'params': [*[y for y in x[1].split(',') if y is not '']]
}
for x in matches
]
},
'params': {
'pattern': r'([^;]+?)[:]([^;]+)[;]?',
'method': lambda matches: [
{
'name': x[0],
'note': x[1]
}
for x in matches
]
}
}
for v in partterns.values():
matches = re.findall(pattern=v['pattern'], string=str(col))
if len(matches):
return v['method'](matches)
return str(col)
sheets = pd.read_excel(excel_path, sheet_name=None)
sheet_names = list(sheets.keys())
for each in sheet_names:
sheet = sheets[each]
values = list(sheet.values.flatten())
data = pd.DataFrame(
np.array(list(map(parseCol, sheet.values.flatten())),
dtype="object").reshape(sheet.shape),
columns=sheet.columns).to_dict(orient="record")
with open(json_path + each + '.json', mode='w', encoding='utf-8') as file:
json.dump(data, file, ensure_ascii=False)
import pandas as pd
import numpy as np
import json
import re
excel_path = 'data/Cards/Excel/cards.xlsx'
json_path = 'data/Cards/JSON/'
def parseCol(col):
partterns = {
'func': {
'pattern': r'(.+?)[(](.*?)[)];?',
'method': lambda matches: [
{
'name': x[0],
'params': [*[y for y in x[1].split(',') if y is not '']]
}
for x in matches
]
},
'params': {
'pattern': r'([^;]+?)[:]([^;]+)[;]?',
'method': lambda matches: [
{
'name': x[0],
'note': x[1]
}
for x in matches
]
}
}
for v in partterns.values():
matches = re.findall(pattern=v['pattern'], string=str(col))
if len(matches):
return v['method'](matches)
return str(col)
sheets = pd.read_excel(excel_path, sheet_name=None)
sheet_names = list(sheets.keys())
for each in sheet_names:
sheet = sheets[each]
values = list(sheet.values.flatten())
data = pd.DataFrame(
np.array(list(map(parseCol, sheet.values.flatten())),
dtype="object").reshape(sheet.shape),
columns=sheet.columns).to_dict(orient="record")
with open(json_path + each + '.json', mode='w', encoding='utf-8') as file:
json.dump(data, file, ensure_ascii=False)
作者: play337 时间: 2020-6-8 06:49
本版块需要密码,您必须在下面输入正确的密码才能浏览这个版块
作者: 574656549 时间: 2020-7-19 17:30
提示: 作者被禁止或删除 内容自动屏蔽
欢迎光临 Project1 (https://rpg.blue/) |
Powered by Discuz! X3.1 |