Perf: read file woker (#1337)
* perf: read file worker * fix: Http node url input * fix: htm2md * fix: html2md * fix: ts * perf: Problem classification increases the matching order * feat: tool response answer
This commit is contained in:
21
packages/service/worker/file/extension/csv.ts
Normal file
21
packages/service/worker/file/extension/csv.ts
Normal file
@@ -0,0 +1,21 @@
|
||||
import Papa from 'papaparse';
|
||||
import { ReadRawTextByBuffer, ReadFileResponse } from '../type';
|
||||
import { readFileRawText } from './rawText';
|
||||
|
||||
// 加载源文件内容
|
||||
export const readCsvRawText = async (params: ReadRawTextByBuffer): Promise<ReadFileResponse> => {
|
||||
const { rawText } = readFileRawText(params);
|
||||
|
||||
const csvArr = Papa.parse(rawText).data as string[][];
|
||||
|
||||
const header = csvArr[0];
|
||||
|
||||
const formatText = header
|
||||
? csvArr.map((item) => item.map((item, i) => `${header[i]}:${item}`).join('\n')).join('\n')
|
||||
: '';
|
||||
|
||||
return {
|
||||
rawText,
|
||||
formatText
|
||||
};
|
||||
};
|
||||
23
packages/service/worker/file/extension/docx.ts
Normal file
23
packages/service/worker/file/extension/docx.ts
Normal file
@@ -0,0 +1,23 @@
|
||||
import mammoth from 'mammoth';
|
||||
import { ReadRawTextByBuffer, ReadFileResponse } from '../type';
|
||||
import { html2md } from '../../htmlStr2Md/utils';
|
||||
|
||||
/**
|
||||
* read docx to markdown
|
||||
*/
|
||||
export const readDocsFile = async ({ buffer }: ReadRawTextByBuffer): Promise<ReadFileResponse> => {
|
||||
try {
|
||||
const { value: html } = await mammoth.convertToHtml({
|
||||
buffer
|
||||
});
|
||||
|
||||
const rawText = html2md(html);
|
||||
|
||||
return {
|
||||
rawText
|
||||
};
|
||||
} catch (error) {
|
||||
console.log('error doc read:', error);
|
||||
return Promise.reject('Can not read doc file, please convert to PDF');
|
||||
}
|
||||
};
|
||||
13
packages/service/worker/file/extension/html.ts
Normal file
13
packages/service/worker/file/extension/html.ts
Normal file
@@ -0,0 +1,13 @@
|
||||
import { ReadRawTextByBuffer, ReadFileResponse } from '../type';
|
||||
import { readFileRawText } from './rawText';
|
||||
import { html2md } from '../../htmlStr2Md/utils';
|
||||
|
||||
export const readHtmlRawText = async (params: ReadRawTextByBuffer): Promise<ReadFileResponse> => {
|
||||
const { rawText: html } = readFileRawText(params);
|
||||
|
||||
const rawText = html2md(html);
|
||||
|
||||
return {
|
||||
rawText
|
||||
};
|
||||
};
|
||||
68
packages/service/worker/file/extension/pdf.ts
Normal file
68
packages/service/worker/file/extension/pdf.ts
Normal file
@@ -0,0 +1,68 @@
|
||||
import * as pdfjs from 'pdfjs-dist/legacy/build/pdf.mjs';
|
||||
// @ts-ignore
|
||||
import('pdfjs-dist/legacy/build/pdf.worker.min.mjs');
|
||||
import { ReadRawTextByBuffer, ReadFileResponse } from '../type';
|
||||
|
||||
type TokenType = {
|
||||
str: string;
|
||||
dir: string;
|
||||
width: number;
|
||||
height: number;
|
||||
transform: number[];
|
||||
fontName: string;
|
||||
hasEOL: boolean;
|
||||
};
|
||||
|
||||
export const readPdfFile = async ({ buffer }: ReadRawTextByBuffer): Promise<ReadFileResponse> => {
|
||||
const readPDFPage = async (doc: any, pageNo: number) => {
|
||||
const page = await doc.getPage(pageNo);
|
||||
const tokenizedText = await page.getTextContent();
|
||||
|
||||
const viewport = page.getViewport({ scale: 1 });
|
||||
const pageHeight = viewport.height;
|
||||
const headerThreshold = pageHeight * 0.95;
|
||||
const footerThreshold = pageHeight * 0.05;
|
||||
|
||||
const pageTexts: TokenType[] = tokenizedText.items.filter((token: TokenType) => {
|
||||
return (
|
||||
!token.transform ||
|
||||
(token.transform[5] < headerThreshold && token.transform[5] > footerThreshold)
|
||||
);
|
||||
});
|
||||
|
||||
// concat empty string 'hasEOL'
|
||||
for (let i = 0; i < pageTexts.length; i++) {
|
||||
const item = pageTexts[i];
|
||||
if (item.str === '' && pageTexts[i - 1]) {
|
||||
pageTexts[i - 1].hasEOL = item.hasEOL;
|
||||
pageTexts.splice(i, 1);
|
||||
i--;
|
||||
}
|
||||
}
|
||||
|
||||
page.cleanup();
|
||||
|
||||
return pageTexts
|
||||
.map((token) => {
|
||||
const paragraphEnd = token.hasEOL && /([。?!.?!\n\r]|(\r\n))$/.test(token.str);
|
||||
|
||||
return paragraphEnd ? `${token.str}\n` : token.str;
|
||||
})
|
||||
.join('');
|
||||
};
|
||||
|
||||
const loadingTask = pdfjs.getDocument(buffer.buffer);
|
||||
const doc = await loadingTask.promise;
|
||||
|
||||
const pageTextPromises = [];
|
||||
for (let pageNo = 1; pageNo <= doc.numPages; pageNo++) {
|
||||
pageTextPromises.push(readPDFPage(doc, pageNo));
|
||||
}
|
||||
const pageTexts = await Promise.all(pageTextPromises);
|
||||
|
||||
loadingTask.destroy();
|
||||
|
||||
return {
|
||||
rawText: pageTexts.join('')
|
||||
};
|
||||
};
|
||||
18
packages/service/worker/file/extension/pptx.ts
Normal file
18
packages/service/worker/file/extension/pptx.ts
Normal file
@@ -0,0 +1,18 @@
|
||||
import { ReadRawTextByBuffer, ReadFileResponse } from '../type';
|
||||
// import { parseOfficeAsync } from 'officeparser';
|
||||
import { parseOffice } from '../parseOffice';
|
||||
|
||||
export const readPptxRawText = async ({
|
||||
buffer,
|
||||
encoding
|
||||
}: ReadRawTextByBuffer): Promise<ReadFileResponse> => {
|
||||
const result = await parseOffice({
|
||||
buffer,
|
||||
encoding: encoding as BufferEncoding,
|
||||
extension: 'pptx'
|
||||
});
|
||||
|
||||
return {
|
||||
rawText: result
|
||||
};
|
||||
};
|
||||
28
packages/service/worker/file/extension/rawText.ts
Normal file
28
packages/service/worker/file/extension/rawText.ts
Normal file
@@ -0,0 +1,28 @@
|
||||
import iconv from 'iconv-lite';
|
||||
import { ReadRawTextByBuffer, ReadFileResponse } from '../type';
|
||||
|
||||
const rawEncodingList = [
|
||||
'ascii',
|
||||
'utf8',
|
||||
'utf-8',
|
||||
'utf16le',
|
||||
'utf-16le',
|
||||
'ucs2',
|
||||
'ucs-2',
|
||||
'base64',
|
||||
'base64url',
|
||||
'latin1',
|
||||
'binary',
|
||||
'hex'
|
||||
];
|
||||
|
||||
// 加载源文件内容
|
||||
export const readFileRawText = ({ buffer, encoding }: ReadRawTextByBuffer): ReadFileResponse => {
|
||||
const content = rawEncodingList.includes(encoding)
|
||||
? buffer.toString(encoding as BufferEncoding)
|
||||
: iconv.decode(buffer, 'gbk');
|
||||
|
||||
return {
|
||||
rawText: content
|
||||
};
|
||||
};
|
||||
45
packages/service/worker/file/extension/xlsx.ts
Normal file
45
packages/service/worker/file/extension/xlsx.ts
Normal file
@@ -0,0 +1,45 @@
|
||||
import { ReadRawTextByBuffer, ReadFileResponse } from '../type';
|
||||
import xlsx from 'node-xlsx';
|
||||
import Papa from 'papaparse';
|
||||
|
||||
export const readXlsxRawText = async ({
|
||||
buffer
|
||||
}: ReadRawTextByBuffer): Promise<ReadFileResponse> => {
|
||||
const result = xlsx.parse(buffer, {
|
||||
skipHidden: false,
|
||||
defval: ''
|
||||
});
|
||||
|
||||
const format2Csv = result.map(({ name, data }) => {
|
||||
return {
|
||||
title: `#${name}`,
|
||||
csvText: data.map((item) => item.join(',')).join('\n')
|
||||
};
|
||||
});
|
||||
|
||||
const rawText = format2Csv.map((item) => item.csvText).join('\n');
|
||||
const formatText = format2Csv
|
||||
.map((item) => {
|
||||
const csvArr = Papa.parse(item.csvText).data as string[][];
|
||||
const header = csvArr[0];
|
||||
|
||||
const formatText = header
|
||||
? csvArr
|
||||
.map((item) =>
|
||||
item
|
||||
.map((item, i) => (item ? `${header[i]}:${item}` : ''))
|
||||
.filter(Boolean)
|
||||
.join('\n')
|
||||
)
|
||||
.join('\n')
|
||||
: '';
|
||||
|
||||
return `${item.title}\n${formatText}`;
|
||||
})
|
||||
.join('\n');
|
||||
|
||||
return {
|
||||
rawText: rawText,
|
||||
formatText
|
||||
};
|
||||
};
|
||||
Reference in New Issue
Block a user