本文介绍了Chrome内存问题 - File API + AngularJS的处理方法,对大家解决问题具有一定的参考价值,需要的朋友们下面随着小编来一起学习吧!

问题描述

我有一个需要将大文件上传到Azure BLOB存储的网络应用程序。我的解决方案使用HTML5 File API分割成块,然后将它们作为blob块放置,块的ID存储在一个数组中,然后将这些块作为blob提交。

解决方案在IE中运行正常。在64位Chrome上,我已成功上传4Gb文件,但看到非常繁重的内存使用情况(2Gb +)。在32位Chrome上,具体的chrome进程将达到500-550Mb左右,然后崩溃。



我无法看到任何明显的内存泄漏或我可以改变的帮助垃圾收集。我将块ID存储在一个数组中,所以显然会有一些内存蠕变,但这不应该是巨大的。这就好像File API将整个文件保存在内存中一样。



它被写为一个从控制器调用的Angular服务,我认为服务代码是相关:

 (function(){
'use strict';

angular
.module('app.core')
.factory('blobUploadService',
[
'$ http','stringUtilities',
blobUploadService
]);

函数blobUploadService($ http,stringUtilities){

var defaultBlockSize = 1024 * 1024; //默认为1024KB
var stopWatch = {};
var state = {};

var initializeState = function(config){
var blockSize = defaultBlockSize;
if(config.blockSize)blockSize = config.blockSize ;

var maxBlockSize = blockSize;
var numberOfBlocks = 1;

var file = config.file;

var fileSize = f ile.size;
if(fileSize< blockSize){
maxBlockSize = fileSize;
}

if(fileSize%maxBlockSize === 0){
numberOfBlocks = fileSize / maxBlockSize;
} else {
numberOfBlocks = parseInt(fileSize / maxBlockSize,10)+ 1;
}

return {
maxBlockSize:maxBlockSize,
numberOfBlocks:numberOfBlocks,
totalBytesRemaining:fileSize,
currentFilePointer:0,
blockIds:new Array(),
blockIdPrefix:'block-',
bytesUploaded:0,
submitUri:null,
file:file,
baseUrl:config .baseUrl,
sasToken:config.sasToken,
fileUrl:config.baseUrl + config.sasToken,
进度:config.progress,
完成:config.complete,
错误:config.error,
取消:false
};
};
$ b $ * config:{
baseUrl:// baseUrl for blob file uri(ie http://< accountName> .blob.core.windows.net /< container> / < blobname>),
sasToken://共享访问签名querystring键/值,前缀为?,
file://使用HTML5 File API的文件对象
progress://进度回调函数,
完成://完成回调函数,
错误://错误回调函数,
blockSize://使用它来覆盖defaultBlockSize
} * /
var upload = function(config){
state = initializeState(config);

var reader = new FileReader();
reader.onloadend = function(evt){
if(evt.target.readyState === FileReader.DONE&&&!state.cancelled){//完成=== 2
var uri = state.fileUrl +'& comp = block& blockid ='+ state.blockIds [state.blockIds.length - 1];
var requestData = new Uint8Array(evt.target.result);

$ http.put(uri,
requestData,
{
headers:{
'x-ms-blob-type':'BlockBlob' ,
'Content-Type':state.file.type
},
transformRequest:[]
})
.success(function(data,status,headers ,config){
state.bytesUploaded + = requestData.length;

var percentComplete =((parseFloat(state.bytesUploaded)/ parseFloat(state.file.size))* 100
).toFixed(2);
if(state.progress)state.progress(percentComplete,data,status,headers,config);
$ b $ uploadFileInBlocks(reader,state);
})
.error(函数(data,status,headers,config){
if(state.error)state.error(data,status,headers,config);
});
}
};

uploadFileInBlocks(reader,state);

return {
cancel:function(){
state.cancelled = true;
}
};
};

函数cancel(){
stopWatch = {};
state.cancelled = true;
返回true;
}

函数startStopWatch(handle){
if(stopWatch [handle] === undefined){
stopWatch [handle] = {};
stopWatch [handle] .start = Date.now();



function stopStopWatch(handle){
stopWatch [handle] .stop = Date.now();
var duration = stopWatch [handle] .stop - stopWatch [handle] .start;
删除stopWatch [句柄];
回报期;
}

var commitBlockList = function(state){
var uri = state.fileUrl +'& comp = blocklist';

var requestBody ='<?xml version =1.0encoding =utf-8?>< BlockList>';
for(var i = 0; i< state.blockIds.length; i ++){
requestBody + ='< Latest>'+ state.blockIds [i] +'< / Latest> ;
}
requestBody + ='< / BlockList>';

$ http.put(uri,
requestBody,
{
headers:{
'x-ms-blob-content-type':state (数据,状态,头文件,配置文件){
if(state.complete)state.complete(data,status ,header,config);
})
.error(function(data,status,headers,config){
if(state.error)state.error(data,status,headers, config);
//如果发生错误,则异步调用
//或服务器返回错误状态的响应
});
};
$ b $ var uploadFileInBlocks = function(reader,state){
if(!state.cancelled){
if(state.totalBytesRemaining> 0){

var fileContent = state.file.slice(state.currentFilePointer,
state.currentFilePointer + state.maxBlockSize);
var blockId = state.blockIdPrefix + stringUtilities.pad(state.blockIds.length,6);

state.blockIds.push(btoa(blockId));
reader.readAsArrayBuffer(fileContent);

state.currentFilePointer + = state.maxBlockSize;
state.totalBytesRemaining - = state.maxBlockSize;
if(state.totalBytesRemaining< state.maxBlockSize){
state.maxBlockSize = state.totalBytesRemaining;
}
} else {
commitBlockList(state);
}
}
};

返回{
上传:上传,
取消:取消,
startStopWatch:startStopWatch,
stopStopWatch:stopStopWatch
};
};
})();

有什么方法可以移动对象的范围以帮助使用Chrome GC?我已经看到其他人提到类似的问题,但理解Chromium已经解决了一些问题。



我应该说我的解决方案很大程度上基于Gaurav Mantri的博客帖子:



解决方案

Blob 实例存在于文档。虽然 Blob 一旦从 Blob URL Store



$ p
$ b





如果 Blob 对象被传递给 URL.createObjectURL(),在 Blob上调用 URL.revokeObjectURL() / code>或 File 对象,然后调用 .close()。


您可以通过打开

chrome:// blob-internals

查看调用之前和之后的详细信息创建 Blob 并关闭 Blob 。



例如,来自

  xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx 
Refcount:1
Content Type:text / plain
类型:数据
长度:3

  xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx 
Refcount:1
Content Type:text / plain

在调用 .close()之后。同样来自

  blob:http://example.com/c2823f75-de26-46f9-a4e5-95f57b8230bd 
Uuid :29e430a6-f093-40c2-bc70-2b6838a713bc






另一种方法可以是将文件发送为 ArrayBuffer 或数组缓冲区块。然后在服务器上重新组装文件。

或者您可以调用 FileReader 构造函数, FileReader.prototype.readAsArrayBuffer() code>和加载事件 FileReader 每一次。



在 load 事件 FileReader 传递 ArrayBuffer to Uint8Array ,使用 ReadableStream , TypedArray.prototype.subarray(), .getReader(), .read()得到 N ArrayBuffer 的块作为 TypedArray at pull 从 Uint8Array 。当 N 块等于 .byteLength ArrayBuffer 已处理完毕,将 Uint8Array s的数组传递给 Blob 构造函数,以便在浏览器中将文件部分重新组合为单个文件;然后发送 Blob 到服务器。

 <!DOCTYPE html> 
< html>

< head>
< / head>

< body>
< input id =filetype =file>
< br>
< progress value =0>< / progress>
< br>
< output for =file>< img alt =preview>< / output>
< script type =text / javascript>
const [input,output,img,progress,fr,handleError,CHUNK] = [
document.querySelector(input [type ='file'])
,document.querySelector document.querySelector(progress)
,new FileReader
, (err)=> console.log(err)
,1024 * 1024
];

progress.addEventListener(progress,e => {
progress.value = e.detail.value;
e.detail.promise();
});

let [chunks,NEXT,CURR,url,blob] = [Array(),0,0];

input.onchange =()=> {
NEXT = CURR = progress.value = progress.max = chunks.length = 0;
if(url){
URL.revokeObjectURL(url);
if(blob.hasOwnProperty(close)){
blob.close();



if(input.files.length){
console.log(input.files [0]);
progress.max = input.files [0] .size;
progress.step = progress.max / CHUNK;
fr.readAsArrayBuffer(input.files [0]);
}

}

fr.onload =()=> {
const VIEW = new Uint8Array(fr.result);
const LEN = VIEW.byteLength;
const {type,name:filename} = input.files [0];
const stream = new ReadableStream({
pull(controller){
if(NEXT< LEN){
controller
.enqueue(VIEW.subarray(NEXT, !; NEXT?CHUNK:CHUNK + NEXT));
NEXT + = CHUNK;
} else {
controller.close();
}
},
取消(原因){
console.log(原因);
抛出新错误(原因);
}
});

const [reader,processData] = [
stream.getReader()
,({value,done})=> {
if(done){
return reader.closed.then(()=> chunks);
}
chunks.push(value);
return new Promise(resolve => {
progress.dispatchEvent(
new CustomEvent(progress,{
detail:{
value:CURR + = value .byteLength,
promise:resolve
}
})
);
})
.then(()=> reader.read() (data => processData(data)))
.catch(e => reader.cancel(e))
}
]; (data => processData(data))
.then(data => {
blob = new Blob($)数据,{type});
console.log(complete,data,blob);
if(/image/.test(type)){
url = URL.createObjectURL( blob);
img.onload =()=> {
img.title =文件名;
input.value =;
}
img.src = url;
} else {
input.value =;
}
})
.catch(e => handleError(e))

}
< / script>

< / body>

< / html>

plnkr






您还可以使用利用 fetch()

  fetch新请求(/ path / to / server /,{method:PUT,body:blob}))



另请参阅





I have a web app that needs to upload large files to Azure BLOB storage. My solution uses HTML5 File API to slice into chunks which are then put as blob blocks, the IDs of the blocks are stored in an array and then the blocks are committed as a blob.

The solution works fine in IE. On 64 bit Chrome I have successfully uploaded 4Gb files but see very heavy memory usage (2Gb+). On 32 bit Chrome the specific chrome process will get to around 500-550Mb and then crash.

I can't see any obvious memory leaks or things I can change to help garbage collection. I store the block IDs in an array so obviously there will be some memory creeep but this shouldn't be massive. It's almost as if the File API is holding the whole file it slices into memory.

It's written as an Angular service called from a controller, I think just the service code is pertinent:

(function() {
    'use strict';

    angular
    .module('app.core')
    .factory('blobUploadService',
    [
        '$http', 'stringUtilities',
        blobUploadService
    ]);

function blobUploadService($http, stringUtilities) {

    var defaultBlockSize = 1024 * 1024; // Default to 1024KB
    var stopWatch = {};
    var state = {};

    var initializeState = function(config) {
        var blockSize = defaultBlockSize;
        if (config.blockSize) blockSize = config.blockSize;

        var maxBlockSize = blockSize;
        var numberOfBlocks = 1;

        var file = config.file;

        var fileSize = file.size;
        if (fileSize < blockSize) {
            maxBlockSize = fileSize;
        }

        if (fileSize % maxBlockSize === 0) {
            numberOfBlocks = fileSize / maxBlockSize;
        } else {
            numberOfBlocks = parseInt(fileSize / maxBlockSize, 10) + 1;
        }

        return {
            maxBlockSize: maxBlockSize,
            numberOfBlocks: numberOfBlocks,
            totalBytesRemaining: fileSize,
            currentFilePointer: 0,
            blockIds: new Array(),
            blockIdPrefix: 'block-',
            bytesUploaded: 0,
            submitUri: null,
            file: file,
            baseUrl: config.baseUrl,
            sasToken: config.sasToken,
            fileUrl: config.baseUrl + config.sasToken,
            progress: config.progress,
            complete: config.complete,
            error: config.error,
            cancelled: false
        };
    };

    /* config: {
      baseUrl: // baseUrl for blob file uri (i.e. http://<accountName>.blob.core.windows.net/<container>/<blobname>),
      sasToken: // Shared access signature querystring key/value prefixed with ?,
      file: // File object using the HTML5 File API,
      progress: // progress callback function,
      complete: // complete callback function,
      error: // error callback function,
      blockSize: // Use this to override the defaultBlockSize
    } */
    var upload = function(config) {
        state = initializeState(config);

        var reader = new FileReader();
        reader.onloadend = function(evt) {
            if (evt.target.readyState === FileReader.DONE && !state.cancelled) { // DONE === 2
                var uri = state.fileUrl + '&comp=block&blockid=' + state.blockIds[state.blockIds.length - 1];
                var requestData = new Uint8Array(evt.target.result);

                $http.put(uri,
                        requestData,
                        {
                            headers: {
                                'x-ms-blob-type': 'BlockBlob',
                                'Content-Type': state.file.type
                            },
                            transformRequest: []
                        })
                    .success(function(data, status, headers, config) {
                        state.bytesUploaded += requestData.length;

                        var percentComplete = ((parseFloat(state.bytesUploaded) / parseFloat(state.file.size)) * 100
                        ).toFixed(2);
                        if (state.progress) state.progress(percentComplete, data, status, headers, config);

                        uploadFileInBlocks(reader, state);
                    })
                    .error(function(data, status, headers, config) {
                        if (state.error) state.error(data, status, headers, config);
                    });
            }
        };

        uploadFileInBlocks(reader, state);

        return {
            cancel: function() {
                state.cancelled = true;
            }
        };
    };

    function cancel() {
        stopWatch = {};
        state.cancelled = true;
        return true;
    }

    function startStopWatch(handle) {
        if (stopWatch[handle] === undefined) {
            stopWatch[handle] = {};
            stopWatch[handle].start = Date.now();
        }
    }

    function stopStopWatch(handle) {
        stopWatch[handle].stop = Date.now();
        var duration = stopWatch[handle].stop - stopWatch[handle].start;
        delete stopWatch[handle];
        return duration;
    }

    var commitBlockList = function(state) {
        var uri = state.fileUrl + '&comp=blocklist';

        var requestBody = '<?xml version="1.0" encoding="utf-8"?><BlockList>';
        for (var i = 0; i < state.blockIds.length; i++) {
            requestBody += '<Latest>' + state.blockIds[i] + '</Latest>';
        }
        requestBody += '</BlockList>';

        $http.put(uri,
                requestBody,
                {
                    headers: {
                        'x-ms-blob-content-type': state.file.type
                    }
                })
            .success(function(data, status, headers, config) {
                if (state.complete) state.complete(data, status, headers, config);
            })
            .error(function(data, status, headers, config) {
                if (state.error) state.error(data, status, headers, config);
                // called asynchronously if an error occurs
                // or server returns response with an error status.
            });
    };

    var uploadFileInBlocks = function(reader, state) {
        if (!state.cancelled) {
            if (state.totalBytesRemaining > 0) {

                var fileContent = state.file.slice(state.currentFilePointer,
                    state.currentFilePointer + state.maxBlockSize);
                var blockId = state.blockIdPrefix + stringUtilities.pad(state.blockIds.length, 6);

                state.blockIds.push(btoa(blockId));
                reader.readAsArrayBuffer(fileContent);

                state.currentFilePointer += state.maxBlockSize;
                state.totalBytesRemaining -= state.maxBlockSize;
                if (state.totalBytesRemaining < state.maxBlockSize) {
                    state.maxBlockSize = state.totalBytesRemaining;
                }
            } else {
                commitBlockList(state);
            }
        }
    };

    return {
        upload: upload,
        cancel: cancel,
        startStopWatch: startStopWatch,
        stopStopWatch: stopStopWatch
    };
};
})();

Are there any ways I can move the scope of objects to help with Chrome GC? I have seen other people mentioning similar issues but understood Chromium had resolved some.

I should say my solution is heavily based on Gaurav Mantri's blog post here:

http://gauravmantri.com/2013/02/16/uploading-large-files-in-windows-azure-blob-storage-using-shared-access-signature-html-and-javascript/#comment-47480

解决方案

You are correct. The new Blobs created by .slice() are being held in memory.

The solution is to call Blob.prototype.close() on the Blob reference when processing Blob or File object is complete.

Note also, at javascript at Question also creates a new instance of FileReader if upload function is called more than once.

Blob instances exist for the life of document. Though Blob should be garbage collected once removed from Blob URL Store

If Blob object is passed to URL.createObjectURL(), call URL.revokeObjectURL() on Blob or File object, then call .close().

You can view the result of these calls by opening

chrome://blob-internals

reviewing details of before and after calls which create Blob and close Blob.

For example, from

xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
Refcount: 1
Content Type: text/plain
Type: data
Length: 3

to

xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
Refcount: 1
Content Type: text/plain

following call to .close(). Similarly from

blob:http://example.com/c2823f75-de26-46f9-a4e5-95f57b8230bd
Uuid: 29e430a6-f093-40c2-bc70-2b6838a713bc


An alternative approach could be to send file as an ArrayBuffer or chunks of array buffers. Then re-assemble the file at server.

Or you can call FileReader constructor, FileReader.prototype.readAsArrayBuffer(), and load event of FileReader each once.

At load event of FileReader pass ArrayBuffer to Uint8Array, use ReadableStream, TypedArray.prototype.subarray(), .getReader(), .read() to get N chunks of ArrayBuffer as a TypedArray at pull from Uint8Array. When N chunks equaling .byteLength of ArrayBuffer have been processed, pass array of Uint8Arrays to Blob constructor to recombine file parts into single file at browser; then send Blob to server.

<!DOCTYPE html>
<html>

<head>
</head>

<body>
  <input id="file" type="file">
  <br>
  <progress value="0"></progress>
  <br>
  <output for="file"><img alt="preview"></output>
  <script type="text/javascript">
    const [input, output, img, progress, fr, handleError, CHUNK] = [
      document.querySelector("input[type='file']")
      , document.querySelector("output[for='file']")
      , document.querySelector("output img")
      , document.querySelector("progress")
      , new FileReader
      , (err) => console.log(err)
      , 1024 * 1024
    ];

    progress.addEventListener("progress", e => {
      progress.value = e.detail.value;
      e.detail.promise();
    });

    let [chunks, NEXT, CURR, url, blob] = [Array(), 0, 0];

    input.onchange = () => {
      NEXT = CURR = progress.value = progress.max = chunks.length = 0;
      if (url) {
        URL.revokeObjectURL(url);
        if (blob.hasOwnProperty("close")) {
          blob.close();
        }
      }

      if (input.files.length) {
        console.log(input.files[0]);
        progress.max = input.files[0].size;
        progress.step = progress.max / CHUNK;
        fr.readAsArrayBuffer(input.files[0]);
      }

    }

    fr.onload = () => {
      const VIEW = new Uint8Array(fr.result);
      const LEN = VIEW.byteLength;
      const {type, name:filename} = input.files[0];
      const stream = new ReadableStream({
          pull(controller) {
            if (NEXT < LEN) {
              controller
              .enqueue(VIEW.subarray(NEXT, !NEXT ? CHUNK : CHUNK + NEXT));
               NEXT += CHUNK;
            } else {
              controller.close();
            }
          },
          cancel(reason) {
            console.log(reason);
            throw new Error(reason);
          }
      });

      const [reader, processData] = [
        stream.getReader()
        , ({value, done}) => {
            if (done) {
              return reader.closed.then(() => chunks);
            }
            chunks.push(value);
            return new Promise(resolve => {
              progress.dispatchEvent(
                new CustomEvent("progress", {
                  detail:{
                    value:CURR += value.byteLength,
                    promise:resolve
                  }
                })
              );
            })
            .then(() => reader.read().then(data => processData(data)))
            .catch(e => reader.cancel(e))
        }
      ];

      reader.read()
      .then(data => processData(data))
      .then(data => {
        blob = new Blob(data, {type});
        console.log("complete", data, blob);
        if (/image/.test(type)) {
          url = URL.createObjectURL(blob);
          img.onload = () => {
            img.title = filename;
            input.value = "";
          }
          img.src = url;
        } else {
          input.value = "";
        }
      })
      .catch(e => handleError(e))

    }
  </script>

</body>

</html>

plnkr http://plnkr.co/edit/AEZ7iQce4QaJOKut71jk?p=preview


You can also use utilize fetch()

fetch(new Request("/path/to/server/", {method:"PUT", body:blob}))

See also

这篇关于Chrome内存问题 - File API + AngularJS的文章就介绍到这了,希望我们推荐的答案对大家有所帮助,也希望大家多多支持!

08-30 16:03