什么是AudioContext Fingerprint
发现的方法主要是,利用浏览器的Audio Api中的OfflineAudioContext,通过一个振荡器采样并压缩后经过hash计算可以获得一个指纹。差异来自不同版本,不同引擎浏览器的代码实现以及音频处理的大量数学运算,并且在不同的CPU 架构和操作系统中,生成的振荡器信号方法也有不同。而在确定的环境下,由于Audio Api成熟度,使得音频指纹十分稳定。
如何获取
let pxi_output
if (context = new(window.OfflineAudioContext || window.webkitOfflineAudioContext)(1, 44100, 44100), !context) {
pxi_output = 0;
}
pxi_oscillator = context.createOscillator();
pxi_oscillator.type = "triangle";
pxi_oscillator.frequency.value = 1e4;
pxi_compressor = context.createDynamicsCompressor();
pxi_compressor.threshold && (pxi_compressor.threshold.value = -50);
pxi_compressor.knee && (pxi_compressor.knee.value = 40);
pxi_compressor.ratio && (pxi_compressor.ratio.value = 12);
pxi_compressor.reduction && (pxi_compressor.reduction.value = -20);
pxi_compressor.attack && (pxi_compressor.attack.value = 0);
pxi_compressor.release && (pxi_compressor.release.value = .25);
pxi_oscillator.connect(pxi_compressor);
pxi_compressor.connect(context.destination);
pxi_oscillator.start(0);
context.startRendering();
context.oncomplete = function(evnt) {
pxi_output = 0;
let Buffer_ret = '',len = evnt.renderedBuffer.length,renderedBuffer = evnt.renderedBuffer.getChannelData(0)
for (var i = 0; i < len; i++) {
Buffer_ret += renderedBuffer[i].toString()
}
let pxi_full_buffer_hash = hash(Buffer_ret)
console.log('pxi_full_buffer_hash:'+pxi_full_buffer_hash);
for (var i = 4500; 5e3 > i; i++) {
pxi_output += Math.abs(renderedBuffer[i]);
}
console.log('pxi_output:'+pxi_output)
pxi_compressor.disconnect();
}
let I64BIT_TABLE =
'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_-'.split('');
function hash(input){
var hash = 5381;
var i = input.length - 1;
if(typeof input == 'string'){
for (; i > -1; i--)
hash += (hash << 5) + input.charCodeAt(i);
}
else{
for (; i > -1; i--)
hash += (hash << 5) + input[i];
}
var value = hash & 0x7FFFFFFF;
var retValue = '';
do{
retValue += I64BIT_TABLE[value & 0x3F];
}
while(value >>= 6);
return retValue;
}
以下代码大部分从 https://audiofingerprint.openwpm.com/ 抄过来的
干扰
从 AudioBuffer.prototyp.getChannelData 入手,如[》audiocontext-fingerprint-defender]插件,在该接口直接增加噪声
const getChannelData = AudioBuffer.prototyp.getChannelData;
Object.defineProperty(AudioBuffer.prototype, "getChannelData", {
"value": function () {
const results_1 = getChannelData.apply(this, arguments);
if (context.BUFFER !== results_1) {
context.BUFFER = results_1;
window.top.postMessage("audiocontext-fingerprint-defender-alert", '*');
for (var i = 0; i < results_1.length; i += 100) {
let index = Math.floor(Math.random() * i);
results_1[index] = results_1[index] + Math.random() * 0.0000001;
}
}
return results_1;
}
});
另外
其他指纹
audiofingerprint.openwpm.com 中还有录制振荡器以及再配合动态压缩形成的指纹,不过该过程在chrome中会被阻止,触发手动触发媒体播放。粗略看,暂时没有研究,觉得应该是大同小异。
稳定不特别
For audio fingerprinting, we found that the signal contributes only slightly to uniqueness but is highly stable, resulting in a small net increase to fingerprint accuracy.
参考
https://fingerprintjs.com/blog/audio-fingerprinting/
https://audiofingerprint.openwpm.com/