Browse Source

initial commit

v1
Araragi 2 years ago
commit
1609abd0d8
  1. 2
      .eslintignore
  2. 15
      .eslintrc.json
  3. 2
      .gitattributes
  4. 51
      .gitignore
  5. 1
      .npmrc
  6. BIN
      1449696017588-comp.png
  7. BIN
      1449696017588.png
  8. BIN
      842173-2.png
  9. 11
      JKCS.meta.js
  10. 49315
      JKCS.user.js
  11. 16
      LICENSE
  12. 234
      README.md
  13. BIN
      ack.png
  14. 112
      build-chrome.js
  15. 135
      build-ff.js
  16. 41
      build.js
  17. 8
      chrome/browser-polyfill.min.js
  18. 49304
      chrome/dist/main.js
  19. 247
      chrome/dist/model.js
  20. 1
      chrome/dist/test.js
  21. 4
      chrome/dist/weights.js
  22. 39
      chrome/manifest.json
  23. 4
      chrome/options.html
  24. 24925
      dist/main-es5.js
  25. 1
      dist/main-es5.js.map
  26. 49304
      dist/main.js
  27. 1
      dist/tsconfig.tsbuildinfo
  28. 1
      esbuild.inject.js
  29. 18
      extheader.js
  30. BIN
      eye.png
  31. 8
      firefox/browser-polyfill.min.js
  32. 2108
      firefox/dist/background.js
  33. 32450
      firefox/dist/main.js
  34. 29
      firefox/manifest.json
  35. 6
      firefox/options.html
  36. 2
      firefox/polyfill.min.js
  37. BIN
      logo.png
  38. 45
      package.json
  39. 4841
      pnpm-lock.yaml
  40. BIN
      screen.png
  41. BIN
      settings.png
  42. BIN
      spm.png
  43. 460
      src/main.js
  44. 269
      src/model.js
  45. 1
      src/weights.js

2
.eslintignore

@ -0,0 +1,2 @@
src/weights.js
main.user.js

15
.eslintrc.json

@ -0,0 +1,15 @@
{
"env": {
"browser": true,
"es2021": true
},
"extends": ["standard"],
"parserOptions": {
"ecmaVersion": "latest",
"sourceType": "module"
},
"rules": {
"no-undef": ["off"],
"no-unused-vars": "off"
}
}

2
.gitattributes

@ -0,0 +1,2 @@
*.xpi filter=lfs diff=lfs merge=lfs -text
*.bin filter=lfs diff=lfs merge=lfs -text

51
.gitignore

@ -0,0 +1,51 @@
node_modules
.rollup.cache
*.c
*~
*.jpg
*.gif
*.exe
*.cs
*.out
*.zip
ext
*.mp4
*.webm
yarn.lock
out
*.data
ext.js
core.js
rollup.config.js
.vscode/settings.json
key.pem
chrome.pem
1641560780544.png
1642033228102.png
a.png
converted.png
cuck.png
dd.png
f106d2459fc348494ae39e33c0905e885f3fe6b4ae7d1f8dc171ad85b94132b1.png
file.png
index.html
localstorage.html
out.png
rtlt7x.png
test.png
chrome/1449696017588.png
firefox/1449696017588.png
rev/index.html
rev/out.png
1641737123922.png
1449696017588-128.png
a.js
a.py
aa
aa.json
bu.json
efdb47d2f0e04144bbaa-0.235.xpi
efdb47d2f0e04144bbaa-0.245.xpi
build-test.js
dist/test.js
src/pngv4.ts

1
.npmrc

@ -0,0 +1 @@
auto-install-peers=true

BIN
1449696017588-comp.png

Binary file not shown.

After

Width:  |  Height:  |  Size: 25 KiB

BIN
1449696017588.png

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.5 KiB

BIN
842173-2.png

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.2 MiB

11
JKCS.meta.js

@ -0,0 +1,11 @@
// ==UserScript==
// @name Janny Skiller's Captcha Solver
// @namespace /cumg/
// @match https://boards.4channel.org/*
// @match https://boards.4chan.org/*
// @grant none
// @version 1.1
// @author /cumg/, formerly AUTOMATIC
// @description The Janny Skillers Captcha Solver of choice
// ==/UserScript==
const _DOMParser = DOMParser

49315
JKCS.user.js

File diff suppressed because it is too large

16
LICENSE

@ -0,0 +1,16 @@
MIT No Attribution
Copyright 2022 You
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the "Software"), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

234
README.md

@ -0,0 +1,234 @@
PNGExtraEmbedder (PEE)
========================
Can embed any file in a PNG/WebM/GIF/JPEG and upload it to a third-party host through 4chan.
Requires a userscript manager, such as ViolentMonkey.
It should work with 4chan's native extension but 4ChanX is highly recommended as it is much more tested.
Also supports desuarchive.
How to Install
==============
Note: 4chanX isn't a hard requirement, just recommended because it's overall a nicer experience. If you don't want to use 4chanX, make sure the native 4chan extension is enabled in your settings.
## Teh olde way
- Make sure you're using a decent Webkit-based browser (Chromium derivatives) or Firefox.
- [Install ViolentMonkey](https://violentmonkey.github.io/get-it/) (it is preferable to TamperMonkey(closed source) and GreaseMonkey(abandonned shit)), use [ViolentMonkey Beta](https://violentmonkey.github.io/get-it/#beta-release) if you want to spite the b4k meanie admin (based & redpilled). Be sure to read the trouble shooting section to know how to set this up.
- [Install 4chanX (recommended)](https://www.4chan-x.net/builds/4chan-X.user.js)
- Use the prebuilt [main.user.js](https://git.coom.tech/coomdev/PEE/raw/branch/%E4%B8%AD%E5%87%BA%E3%81%97/main.user.js)
## The newer way (WIP)
Please report any issue you have with those (only for mainstream browsers)
Also, use this if you plan to use b4k's archive.
- [Install 4chanX (recommended)](https://www.4chan-x.net/builds/4chan-X.user.js)
- Install the correct WebExtension for your Browser ([Firefox](https://git.coom.tech/fuckjannies/lolipiss/raw/branch/%E4%B8%AD%E5%87%BA%E3%81%97/pngextraembedder-0.291.xpi) or Chrome-based (Down for "maintainance"))
For FF users, the extension is signed so you can just drag and drop it on your about:addons tab.
Chrom* users can now install directly from the chrome webstore.
Known bugs:
- JPEG embedding might not work with some very big files (> 1MB).
- PNG GIFs and WebMs got patched :(
### Side loading
Because publishing on the Chrome WebStore is slow, you should consider side loading the extension.
- [Download](https://git.coom.tech/fuckjannies/lolipiss/archive/%E4%B8%AD%E5%87%BA%E3%81%97.zip) this repo and extract it somewhere
- Type `chrome://extensions` in your address bar
- Enable **Developer Mode**
- Click on **Load Unpacked**
- Select the `chrome` folder from the zip you extracted earlier.
- Optional: Do the same for the `pee-companion` folder instead of `chrome` if you plan to use b4k and/or desuarchive.
If none of the above works for you, switch back to the userscript version and open an issue here.
### Third Eye setup
For compliance with CWS rules, PEE doesn't come with preconfigured domains for Third Eye. Here are the old settings in case you want to reproduce them. This shouldn't concern old time users as you already have those in your settings, unless explicitely removed.
Copy paste this and click "bulk-import"
```
[
{"name": "Gelbooru",
"domain": "gelbooru.com",
"endpoint": "/index.php?page=dapi&s=post&q=index&json=1&tags=md5:",
"view": "https://gelbooru.com/index.php?page=post&s=view&id="
},
{"name": "Yandere",
"domain": "yande.re",
"endpoint": "/post.json?tags=md5:",
"view": "https://yande.re/post/show/"
},
{"name": "Sankaku",
"domain": "capi-v2.sankakucomplex.com",
"endpoint": "/posts/keyset?tags=md5:",
"view": "https://chan.sankakucomplex.com/post/show/"
},
{"name": "Rule34",
"domain": "api.rule34.xxx",
"endpoint": "/index.php?page=dapi&s=post&q=index&json=1&tags=md5:",
"view": "https://rule34.xxx/index.php?page=post&s=view&id="
},
{"name": "Danbooru",
"domain": "danbooru.donmai.us",
"endpoint": "/posts.json?tags=md5:",
"view": "https://danbooru.donmai.us/posts/"
},
{"name": "Lolibooru",
"domain": "lolibooru.moe",
"endpoint": "/post.json?tags=md5:",
"view": "https://lolibooru.moe/post/show/"
},
{"name": "ATFbooru",
"domain": "booru.allthefallen.moe",
"endpoint": "/posts.json?tags=md5:",
"view": "https://booru.allthefallen.moe/posts/"
}]
```
How to Build
============
(You only need to care about this section if you're auditing the code or contributing to development)
`npm i` and
`npm run build` to build the userscript version.
`npm run build_chrome` to build the chromium webextension.
`npm run build_ff` to build the firefox webextension. (You'll have to do the signing yourself, though)
How to use
==========
Posts with an embedded image/video will have an colored dashed line on their right. Golden means the file is external (ie. a booru), and pink means it was a file embedded in the post file.
![eye](settings.png)
this screenshot is outdated, UI has changed a little but I'm too lazy.
In the quick reply form, a magnet icon will appear.
Clicking it will allow you to add files to attach to the file that will be uploaded and shown on 4chan.
Hovering on the magnet will reveal a pencil icon, that will attach the content of your message box to the file, use it as a way to hide messages.
Your embeds will be attached as you add them after you've selected a file, but can be prepared before selecting your main file.
![qr](screen.png)
By default, you can add up to 5 attachments to a file. This limit can be raised, but keep in mind others using the default settings will only see your 5 first files, unless they themselves raised that limit in the settings.
You can also paste files from your clipboard. Click the clipboard button that appears when you hover over the magnet icon, then press CTRL+V.
### Thread Watcher
The "thread watcher" allows you to find threads that contain embeds.
A lot of the results might be false positives from people posting directly files from boorus, so you can adjust the perceptual hash filter settings to reduce that. Setting it to a very high value ensures results will be exclusively made of direct link embeds.
The "Contribute" checkbox makes your browser report posts with embeds you come accross during your regular browsing to [telepee](https://git.coom.tech/coomdev/telepee). It is recommended to enable it if you frequently post as it'll make your posts more visible to other extension users.
# <a id="coom"></a> TroubleShooting
## [NEW] b4k
b4k is a meanie, i disabled lazyloading of thumbnails, heck.
**ACK!**
If you want to use b4k, you will get warning prompts. I added a ton of warning screens so it shouldn't happen to you.
Switch to the beta and enable this setting. If you're using TamperMonkey, it has something similar called "Instant Injection"
![ack](spm.png)
## It doesn't work
I can't help if you don't give me any information, see below.
As mods are banning any kind of discussion about this extension, please open an issue on this repository. Account creation is quick and requires no email verification.
## "I am using [BROWSER] and [USERSCRIPT MANAGER] and when I do [X]..."
That's better. Officially, all developpment is made and tested on latest Chromium with VM. I'm willing to provide support and help for FF and other Chromium-based browsers as long as you use ViolentMonkey and provide as much information as you can: console logs, screenshots, versions...
## Something else
Open an issue on this repository, you need an account but email verification is disabled.
## It's slow
The slowest machine I have available is an 8GB 2011 Sandy Bridge i5 with a 1660Ti, the only way I can tell something is slow is if you post a performance profile for me to study. (DevTools > Performance > Reload and Start Profiler > Save Profiler).
There are parts where slowness is unavoidable, for example if you have a slow internet connection and enabled preloading (what the h*ck are you doing?).
Even without preloading enabled, PEE still makes many requests at the start of a page as it fetches a small initial chunk of png/webm/gif files to know if something is embedded in there, only progressing further when something is detected. It also does boorus/catbox requests for the corresponding "filenames"".
"""""
## Why is it so big
The file-type detection package is huge as it detect many file types, but also depends on node constructs that are also huge by themselves. There's also a webm parser that's relatively big that's used for embedding/extracting files in/from webms. There's also the svelte UI that compiles down to simple javascript.
## How do I know it's not a botnet???
You're free to audit the code. You don't have to audit the 22000 loc file, you just need to audit the 3000-something lines of typescript code in the `src` folder, build it as instructed, and compare it to the one distributed.
## But embedding is a bannable offense!!!11
Yeah, well use at your own risk, you double baka.
While it is true PEE used to allow you to embed complete files in your uploads, recent changes to 4chans have made it much less practical (limited to really small files), so it's falling back to linking external content hosted on pomf-clones such as catbox.
Links are much smaller and as some PNG editing software injects their own metadata, 4chan is basically required to allow some little amount of it to go through, lest they inconvenience a lot of their users, so a link-embedding detection method cannot be generalized, meaning they rely on unpaid janny labor to moderate this kind of content.
Just be discreet about it and you won't get into trouble.
# H*ck jannies
/cumg/ threads are still banned despite being tolerated for over a year and the current ones being made not breaking any rule or even daring to thread on a grey line.
Their OPs are wrongfully being banned under the pretense of using proxies/VPNs, or evading bans that didn't exist in the first place.
# Supports
Third Eye
---------
Third eye "filenames" are supported.
"Filename" just "need" to be made of 32 hex characters that correspond to a ""filena"me" "in" any of the supported "boorus.
"""""
Catbox
------
Supports:
- Base64 "filenames"
- [\<host>=\<file">]" "filenames"
- [\<type>=\<URL>] """"filenames"" (URL must be one of the supported hosts (catbox, "pomf", zzzz""...))""
* \<type> is ""ignored"" and is inferred from the file "content"
Hydrus
------
By setting an API Key, you can automatically embed random files (prefiltered by your tags) into your uploads. You can also directly search, pick and embed from your Hydrus database from within PEE.
To generate an API Key, first enable the Hydrus Client API:
- Services > Manage Services > Client API
Leave the default port at 45869, enable CORS headers (required), and disable "allow non-local connections" (optional, but better security)
Apply your changes, then:
- Services > Review Services > Local > Client API > Add > Manually
Take note of the Access Key, enable the "Search for files" permission, apply your changes.
Then give this Access key to PEE where it's asked for.
# Bonus
This is bonus.
We reached 1000+ downloads on the Chrome WebStore.
![ack](842173-2.png)
Original character drawn by monmon.

BIN
ack.png

Binary file not shown.

After

Width:  |  Height:  |  Size: 23 KiB

112
build-chrome.js

@ -0,0 +1,112 @@
/* eslint-disable */
import { spawnSync } from "child_process";
import { writeFileSync, readFileSync, copyFileSync } from "fs";
import inlineWorkerPlugin from "esbuild-plugin-inline-worker";
import esbuild from "esbuild";
//import path from 'path';
//import ChromeExtension from 'crx';
//const crx = new ChromeExtension({
// codebase: 'https://github.com/coomdev/pngextraembedder/raw/branch/%E4%B8%AD%E5%87%BA%E3%81%97/PEE-chrome.crx',
// privateKey: readFileSync('./key.pem')
//});
let res = spawnSync("git", ["rev-list", "--count", "HEAD"]);
let rev = +res.stdout;
const domains = ["https://*.4chan.org/*", "https://*.4channel.org/*"];
const manif3 = {
manifest_version: 3,
// "update_url": "https://github.com/coomdev/pngextraembedder/raw/branch/%E4%B8%AD%E5%87%BA%E3%81%97/chrome_update.xml",
name: "Janny Skillers Captcha Solver",
description: "The Janny Skillers Captcha Solver of choice.",
version: "0." + rev,
icons: {
64: "1449696017588.png",
},
permissions: [],
host_permissions: ["<all_urls>"],
web_accessible_resources: [
{
resources: ["*.html", "*.js"],
matches: ["<all_urls>"],
},
],
content_scripts: [
{
matches: domains,
css: [],
run_at: "document_end",
js: ["dist/weights.js", "dist/model.js", "dist/main.js"],
},
],
};
(async () => {
let res;
const lmanif = manif3;
res = await esbuild.build({
entryPoints: ["src/weights.js", "src/model.js", "src/main.js"],
bundle: true,
treeShaking: true,
outdir: "./chrome/dist",
// outfile: "./chrome/dist/main.js",
define: {
global: "window",
execution_mode: '"chrome_api"',
manifest: lmanif.version,
isBackground: "false",
BUILD_VERSION: JSON.stringify([0, rev]),
},
// inject: ["./esbuild.inject.js"],
plugins: [],
loader: {},
metafile: true,
});
console.log(res.metafile.inputs);
console.log(
Object.entries(res.metafile.inputs)
.sort((a, b) => a[1].bytes - b[1].bytes)
.map((e) => `${e[0]} -> ${e[1].bytes}`)
.join("\n")
);
/*
res = await esbuild.build({
entryPoints: ["src/main.js"],
bundle: true,
treeShaking: true,
outfile: "./chrome/dist/background.js",
define: {
global: "window",
execution_mode: '"chrome_api"',
manifest: lmanif.version,
isBackground: "true",
BUILD_VERSION: JSON.stringify([0, rev]),
},
inject: ["./esbuild.inject.js"],
metafile: true,
});
console.log(res.metafile.inputs);
console.log(
Object.entries(res.metafile.inputs)
.sort((a, b) => a[1].bytes - b[1].bytes)
.map((e) => `${e[0]} -> ${e[1].bytes}`)
.join("\n")
);
*/
writeFileSync("./chrome/manifest.json", JSON.stringify(lmanif, null, 2));
copyFileSync("./logo.png", "./chrome/1449696017588.png");
//const ext = await crx.load('./chrome');
//const crxBuffer = await ext.pack();
//const updateXML = crx.generateUpdateXML();
//writeFileSync('./chrome_update.xml', updateXML);
//writeFileSync('./PEE-chrome.crx', crxBuffer);
})();

135
build-ff.js

@ -0,0 +1,135 @@
/* eslint-disable */
import { spawnSync } from "child_process";
import { writeFileSync, readFileSync, copyFileSync } from "fs";
import esbuild from "esbuild";
import esbuildSvelte from "esbuild-svelte";
import sveltePreprocess from "svelte-preprocess";
import webExt from "web-ext";
let res = spawnSync("git", ["rev-list", "--count", "HEAD"]);
let rev = +res.stdout;
const domains = ["https://*.4chan.org/*", "https://*.4channel.org/*"];
const manif = {
manifest_version: 2,
browser_specific_settings: {
gecko: {
update_url:
"https://git.coom.tech/fuckjannies/lolipiss/raw/branch/%E4%B8%AD%E5%87%BA%E3%81%97/firefox_update.json",
},
},
name: "PngExtraEmbedder",
description: "Discover embedded files on 4chan and archives!",
version: "0." + rev,
icons: {
64: "1449696017588.png",
},
permissions: [...domains],
content_scripts: [
{
matches: domains,
css: [],
run_at: "document_end",
js: [
"polyfill.min.js",
"dist/weights.js",
"dist/model.js",
"dist/main.js",
],
},
],
web_accessible_resources: ["*.html", "*.js"],
// "background": {
// persistent: true,
// "scripts": [
// "polyfill.min.js",
// "browser-polyfill.min.js",
// "dist/background.js"
// ]
// }
};
(async () => {
let res;
res = await esbuild.build({
entryPoints: ["src/weights.js", "src/model.js", "src/main.js"],
bundle: true,
treeShaking: true,
outdir: "./firefox/dist",
// outfile: "./firefox/dist/main.js",
define: {
global: "window",
execution_mode: '"ff_api"',
manifest: manif.version,
isBackground: "false",
BUILD_VERSION: JSON.stringify([0, rev]),
},
// inject: ["./esbuild.inject.js"],
plugins: [],
loader: {},
metafile: true,
});
console.log(res.metafile.inputs);
console.log(
Object.entries(res.metafile.inputs)
.sort((a, b) => a[1].bytes - b[1].bytes)
.map((e) => `${e[0]} -> ${e[1].bytes}`)
.join("\n")
);
/*
res = await esbuild.build({
entryPoints: ["src/background.ts"],
bundle: true,
treeShaking: true,
outfile: "./firefox/dist/background.js",
define: {
global: "window",
execution_mode: '"ff_api"',
manifest: manif.version,
isBackground: "true",
BUILD_VERSION: JSON.stringify([0, rev]),
},
inject: ["./esbuild.inject.js"],
metafile: true,
});
console.log(res.metafile.inputs);
console.log(
Object.entries(res.metafile.inputs)
.sort((a, b) => a[1].bytes - b[1].bytes)
.map((e) => `${e[0]} -> ${e[1].bytes}`)
.join("\n")
);
*/
writeFileSync("./firefox/manifest.json", JSON.stringify(manif, null, 2));
copyFileSync("./logo.png", "./chrome/1449696017588.png");
res = await webExt.cmd.build({
sourceDir: "./firefox/",
artifactsDir: ".",
filename: "PEE-firefox.zip",
overwriteDest: true,
});
console.log(res);
writeFileSync(
"./firefox_update.json",
JSON.stringify({
addons: {
"{34ac4994-07f2-44d2-8599-682516a6c6a6}": {
updates: [
{
version: manif.version,
update_link: `https://git.coom.tech/fuckjannies/lolipiss/raw/branch/%E4%B8%AD%E5%87%BA%E3%81%97/pngextraembedder-${manif.version}.xpi`,
},
],
},
},
})
);
})();

41
build.js

@ -0,0 +1,41 @@
/* eslint-disable */
import { spawnSync } from "child_process";
import { writeFileSync, readFileSync } from "fs";
import esbuild from "esbuild";
import { extheader } from "./extheader.js";
let res = spawnSync("git", ["rev-list", "--count", "HEAD"]);
let rev = +res.stdout;
(async () => {
let res;
res = await esbuild.build({
entryPoints: ["src/main.js"],
bundle: true,
treeShaking: true,
outfile: "./dist/main.js",
define: {
global: "window",
execution_mode: JSON.stringify(process.argv[2] || "userscript"),
isBackground: JSON.stringify("false"),
BUILD_VERSION: JSON.stringify([0, rev]),
},
// inject: ["./esbuild.inject.js"],
plugins: [],
loader: {},
metafile: true,
});
console.log(res.metafile.inputs);
console.log(
Object.entries(res.metafile.inputs)
.sort((a, b) => a[1].bytes - b[1].bytes)
.map((e) => `${e[0]} -> ${e[1].bytes}`)
.join("\n")
);
writeFileSync("./JKCS.user.js", extheader + readFileSync("./dist/main.js"));
writeFileSync("./JKCS.meta.js", extheader);
})();

8
chrome/browser-polyfill.min.js

File diff suppressed because one or more lines are too long

49304
chrome/dist/main.js

File diff suppressed because it is too large

247
chrome/dist/model.js

@ -0,0 +1,247 @@
(() => {
// src/model.js
window.modelJSON = {
format: "layers-model",
generatedBy: "keras v2.4.0",
convertedBy: "TensorFlow.js Converter v3.7.0",
modelTopology: {
keras_version: "2.4.0",
backend: "tensorflow",
model_config: {
class_name: "Sequential",
config: {
name: "sequential",
layers: [
{
class_name: "InputLayer",
config: {
batch_input_shape: [null, null, 80, 1],
dtype: "float32",
sparse: false,
ragged: false,
name: "conv2d_input"
}
},
{
class_name: "Conv2D",
config: {
name: "conv2d",
trainable: true,
batch_input_shape: [null, null, 80, 1],
dtype: "float32",
filters: 40,
kernel_size: [3, 3],
strides: [1, 1],
padding: "same",
data_format: "channels_last",
dilation_rate: [1, 1],
groups: 1,
activation: "relu",
use_bias: true,
kernel_initializer: {
class_name: "GlorotUniform",
config: { seed: null }
},
bias_initializer: { class_name: "Zeros", config: {} },
kernel_regularizer: null,
bias_regularizer: null,
activity_regularizer: null,
kernel_constraint: null,
bias_constraint: null
}
},
{
class_name: "MaxPooling2D",
config: {
name: "max_pooling2d",
trainable: true,
dtype: "float32",
pool_size: [2, 2],
padding: "same",
strides: [2, 2],
data_format: "channels_last"
}
},
{
class_name: "Conv2D",
config: {
name: "conv2d_1",
trainable: true,
dtype: "float32",
filters: 60,
kernel_size: [3, 3],
strides: [1, 1],
padding: "same",
data_format: "channels_last",
dilation_rate: [1, 1],
groups: 1,
activation: "relu",
use_bias: true,
kernel_initializer: {
class_name: "GlorotUniform",
config: { seed: null }
},
bias_initializer: { class_name: "Zeros", config: {} },
kernel_regularizer: null,
bias_regularizer: null,
activity_regularizer: null,
kernel_constraint: null,
bias_constraint: null
}
},
{
class_name: "MaxPooling2D",
config: {
name: "max_pooling2d_1",
trainable: true,
dtype: "float32",
pool_size: [2, 2],
padding: "same",
strides: [2, 2],
data_format: "channels_last"
}
},
{
class_name: "Reshape",
config: {
name: "reshape",
trainable: true,
dtype: "float32",
target_shape: [-1, 1200]
}
},
{
class_name: "Bidirectional",
config: {
name: "bidi",
trainable: true,
dtype: "float32",
layer: {
class_name: "LSTM",
config: {
name: "lstm",
trainable: true,
dtype: "float32",
return_sequences: true,
return_state: false,
go_backwards: false,
stateful: false,
unroll: false,
time_major: false,
units: 200,
activation: "tanh",
recurrent_activation: "sigmoid",
use_bias: true,
kernel_initializer: {
class_name: "GlorotUniform",
config: { seed: null }
},
recurrent_initializer: {
class_name: "Orthogonal",
config: { gain: 1, seed: null }
},
bias_initializer: { class_name: "Zeros", config: {} },
unit_forget_bias: true,
kernel_regularizer: null,
recurrent_regularizer: null,
bias_regularizer: null,
activity_regularizer: null,
kernel_constraint: null,
recurrent_constraint: null,
bias_constraint: null,
dropout: 0,
recurrent_dropout: 0,
implementation: 2
}
},
merge_mode: "concat"
}
},
{
class_name: "Dense",
config: {
name: "dense",
trainable: true,
dtype: "float32",
units: 22,
activation: "softmax",
use_bias: true,
kernel_initializer: {
class_name: "GlorotUniform",
config: { seed: null }
},
bias_initializer: { class_name: "Zeros", config: {} },
kernel_regularizer: null,
bias_regularizer: null,
activity_regularizer: null,
kernel_constraint: null,
bias_constraint: null
}
}
]
}
},
training_config: {
loss: null,
metrics: null,
weighted_metrics: null,
loss_weights: null,
optimizer_config: {
class_name: "RMSprop",
config: {
name: "RMSprop",
learning_rate: 1e-3,
decay: 0,
rho: 0.9,
momentum: 0,
epsilon: 1e-7,
centered: false
}
}
}
},
weightsManifest: [
{
paths: ["group1-shard1of1.bin"],
weights: [
{
name: "bidi/forward_lstm/lstm_cell_4/kernel",
shape: [1200, 800],
dtype: "float32"
},
{
name: "bidi/forward_lstm/lstm_cell_4/recurrent_kernel",
shape: [200, 800],
dtype: "float32"
},
{
name: "bidi/forward_lstm/lstm_cell_4/bias",
shape: [800],
dtype: "float32"
},
{
name: "bidi/backward_lstm/lstm_cell_5/kernel",
shape: [1200, 800],
dtype: "float32"
},
{
name: "bidi/backward_lstm/lstm_cell_5/recurrent_kernel",
shape: [200, 800],
dtype: "float32"
},
{
name: "bidi/backward_lstm/lstm_cell_5/bias",
shape: [800],
dtype: "float32"
},
{ name: "conv2d/kernel", shape: [3, 3, 1, 40], dtype: "float32" },
{ name: "conv2d/bias", shape: [40], dtype: "float32" },
{ name: "conv2d_1/kernel", shape: [3, 3, 40, 60], dtype: "float32" },
{ name: "conv2d_1/bias", shape: [60], dtype: "float32" },
{ name: "dense/kernel", shape: [400, 22], dtype: "float32" },
{ name: "dense/bias", shape: [22], dtype: "float32" }
]
}
]
};
})();

1
chrome/dist/test.js

@ -0,0 +1 @@
fetch("https://de.catbox.moe/jpovmr.png");

4
chrome/dist/weights.js

File diff suppressed because one or more lines are too long

39
chrome/manifest.json

@ -0,0 +1,39 @@
{
"manifest_version": 3,
"name": "4chan Captcha Solver",
"description": "4chan Captcha Solver",
"version": "0.292",
"icons": {
"64": "1449696017588.png"
},
"permissions": [],
"host_permissions": [
"<all_urls>"
],
"web_accessible_resources": [
{
"resources": [
"*.html",
"*.js"
],
"matches": [
"<all_urls>"
]
}
],
"content_scripts": [
{
"matches": [
"https://*.4chan.org/*",
"https://*.4channel.org/*"
],
"css": [],
"run_at": "document_end",
"js": [
"dist/weights.js",
"dist/model.js",
"dist/main.js"
]
}
]
}

4
chrome/options.html

@ -0,0 +1,4 @@
<!DOCTYPE html>
<body>
<script src="./dist/background.js"></script>
</body>

24925
dist/main-es5.js

File diff suppressed because one or more lines are too long

1
dist/main-es5.js.map

File diff suppressed because one or more lines are too long

49304
dist/main.js

File diff suppressed because it is too large

1
dist/tsconfig.tsbuildinfo

File diff suppressed because one or more lines are too long

1
esbuild.inject.js

@ -0,0 +1 @@
// export let Buffer = require("buffer").Buffer;

18
extheader.js

@ -0,0 +1,18 @@
import { spawnSync } from 'child_process'
const res = spawnSync('git', ['rev-list', '--count', 'HEAD'])
// eslint-disable-next-line no-unused-vars
const rev = +res.stdout
export const extheader = `// ==UserScript==
// @name Janny Skiller's Captcha Solver
// @namespace /cumg/
// @match https://boards.4channel.org/*
// @match https://boards.4chan.org/*
// @grant none
// @version 1.1
// @author /cumg/, formerly AUTOMATIC
// @description The Janny Skillers Captcha Solver of choice
// ==/UserScript==
const _DOMParser = DOMParser;
`

BIN
eye.png

Binary file not shown.

After

Width:  |  Height:  |  Size: 50 KiB

8
firefox/browser-polyfill.min.js

File diff suppressed because one or more lines are too long

2108
firefox/dist/background.js

File diff suppressed because it is too large

32450
firefox/dist/main.js

File diff suppressed because one or more lines are too long

29
firefox/manifest.json

@ -0,0 +1,29 @@
{
"manifest_version": 2,
"browser_specific_settings": {
"gecko": {
"update_url": "https://git.coom.tech/fuckjannies/lolipiss/raw/branch/%E4%B8%AD%E5%87%BA%E3%81%97/firefox_update.json"
}
},
"name": "PngExtraEmbedder",
"description": "Discover embedded files on 4chan and archives!",
"version": "0.001",
"icons": {
"64": "1449696017588.png"
},
"permissions": ["https://*.4chan.org/*", "https://*.4channel.org/*"],
"content_scripts": [
{
"matches": ["https://*.4chan.org/*", "https://*.4channel.org/*"],
"css": [],
"run_at": "document_end",
"js": [
"polyfill.min.js",
"dist/weights.js",
"dist/model.js",
"dist/main.js"
]
}
],
"web_accessible_resources": ["*.html", "*.js"]
}

6
firefox/options.html

@ -0,0 +1,6 @@
<!DOCTYPE html>
<body>
<script src="./polyfill.min.js"></script>
<script src="./browser-polyfill.min.js"></script>
<script src="./dist/background.js"></script>
</body>

2
firefox/polyfill.min.js

File diff suppressed because one or more lines are too long

BIN
logo.png

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.5 KiB

45
package.json

@ -0,0 +1,45 @@
{
"name": "janny-skillers-captcha-solver",
"version": "1.0.0",
"description": "",
"type": "module",
"main": "index.js",
"scripts": {
"test": "node ./build-test.js",
"build": "node ./build.js",
"ver": "tsc -v",
"build_chrome": "node ./build-chrome.js",
"build_ff": "node ./build-ff.js",
"watch": "esbuild src/main.ts --bundle --outfile=dist/main.js --define:global=window --watch",
"lint": "pnpm exec standard --fix"
},
"author": "/cumg/, formerly AUTOMATIC",
"license": "ISC",
"dependencies": {
"@tensorflow/tfjs": "^3.19.0"
},
"devDependencies": {
"@types/tampermonkey": "^4.0.5",
"@typescript-eslint/eslint-plugin": "^5.32.0",
"crx": "^5.0.1",
"esbuild": "^0.14.53",
"esbuild-css-modules-plugin": "^2.5.1",
"esbuild-plugin-inline-worker": "^0.1.1",
"eslint": "^8.21.0",
"eslint-config-standard": "^17.0.0",
"eslint-plugin-import": "^2.26.0",
"eslint-plugin-n": "^15.2.4",
"eslint-plugin-promise": "^6.0.0",
"web-ext": "^7.1.1",
"web-ext-types": "^3.2.1"
},
"browser": {
"node:buffer": "buffer",
"node:stream": "readable-stream",
"stream": "readable-stream",
"assert": "assert-browserify",
"path": "assert-browserify",
"process": "browser-process",
"zlib": "browserify-zlib"
}
}

4841
pnpm-lock.yaml

File diff suppressed because it is too large

BIN
screen.png

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.7 KiB

BIN
settings.png

Binary file not shown.

After

Width:  |  Height:  |  Size: 50 KiB

BIN
spm.png

Binary file not shown.

After

Width:  |  Height:  |  Size: 31 KiB

460
src/main.js

@ -0,0 +1,460 @@
// eslint-disable-next-line import/no-unassigned-import
import * as tf from '@tensorflow/tfjs'
const charset = [
'',
'0',
'2',
'4',
'8',
'A',
'D',
'G',
'H',
'J',
'K',
'M',
'N',
'P',
'Q',
'R',
'S',
'T',
'V',
'W',
'X',
'Y'
]
let model
tf.setBackend('cpu') // takes too long with webgl backend
function toggle (obj, v) {
if (v) obj.style.display = ''
else obj.style.display = 'none'
}
function base64ToArray (base64) {
const binaryString = window.atob(base64)
const len = binaryString.length
const bytes = new Uint8Array(len)
for (let i = 0; i < len; i++) {
bytes[i] = binaryString.charCodeAt(i)
}
return bytes.buffer
}
const iohander = {
load: function () {
return new Promise((resolve, reject) => {
resolve({
modelTopology: window.modelJSON.modelTopology,
weightSpecs: window.modelJSON.weightsManifest[0].weights,
weightData: base64ToArray(window.weights64),
format: window.modelJSON.format,
generatedBy: window.modelJSON.generatedBy,
convertedBy: window.modelJSON.convertedBy
})
})
}
}
async function load () {
const uploadJSONInput = document.getElementById('upload-json')
const uploadWeightsInput = document.getElementById('upload-weights-1')
model = await tf.loadLayersModel(iohander)
return model
}
function black (x) {
return x < 64
}
// Calculates "disorder" of the image. "Disorder" is the percentage of black pixels that have a
// non-black pixel below them. Minimizing this seems to be good enough metric for solving the slider.
function calculateDisorder (imgdata) {
const a = imgdata.data
const w = imgdata.width
const h = imgdata.height
const pic = []
const visited = []
for (let c = 0; c < w * h; c++) {
if (visited[c]) continue
if (!black(a[c * 4])) continue
let blackCount = 0
const items = []
const toVisit = [c]
while (toVisit.length > 0) {
const cc = toVisit[toVisit.length - 1]
toVisit.splice(toVisit.length - 1, 1)
if (visited[cc]) continue
visited[cc] = 1
if (black(a[cc * 4])) {
items.push(cc)
blackCount++
toVisit.push(cc + 1)
toVisit.push(cc - 1)
toVisit.push(cc + w)
toVisit.push(cc - w)
}
}
if (blackCount >= 24) {
items.forEach(function (x) {
pic[x] = 1
})
}
}
let res = 0
let total = 0
for (let c = 0; c < w * h - w; c++) {
if (pic[c] !== pic[c + w]) res += 1
if (pic[c]) total += 1
}
return res / (total === 0 ? 1 : total)
}
// returns ImageData from captcha's background image, foreground image, and offset (ranging from 0 to -50)
function imageFromCanvas (img, bg, off) {
const h = img.height
const w = img.width
const th = 80
const ph = 0
const pw = 16
const scale = th / h
const canvas = document.createElement('canvas')
canvas.height = w * scale + pw * 2
canvas.width = th
const ctx = canvas.getContext('2d')
ctx.fillStyle = 'rgb(238,238,238)'
ctx.fillRect(0, 0, canvas.width, canvas.height)
ctx.translate(canvas.width / 2, canvas.height / 2)
ctx.scale(-scale, scale)
ctx.rotate((90 * Math.PI) / 180)
const draw = function (off) {
if (bg) {
const border = 4
ctx.drawImage(
bg,
-off + border,
0,
w - border * 2,
h,
-w / 2 + border,
-h / 2,
w - border * 2,
h
)
}
ctx.drawImage(img, -w / 2, -h / 2, w, h)
}
// if off is not specified and background image is present, try to figure out
// the best offset automatically; select the offset that has smallest value of
// calculateDisorder for the resulting image
if (bg && off == null) {
let bestDisorder = 999
let bestImagedata = null
let bestOff = -1
for (let off = 0; off >= -50; off--) {
draw(off)
const imgdata = ctx.getImageData(0, 0, canvas.width, canvas.height)
const disorder = calculateDisorder(imgdata)
if (disorder < bestDisorder) {
bestDisorder = disorder
bestImagedata = imgdata
bestOff = off
}
}
// not the best idea to do this here
setTimeout(function () {
const bg = document.getElementById('t-bg')
const slider = document.getElementById('t-slider')
if (!bg || !slider) return
slider.value = -bestOff * 2
bg.style.backgroundPositionX = bestOff + 'px'
}, 1)
return bestImagedata
} else {
draw(off)
return ctx.getImageData(0, 0, canvas.width, canvas.height)
}
}
// for debugging purposes
function imagedataToImage (imagedata) {
const canvas = document.createElement('canvas')
const ctx = canvas.getContext('2d')
canvas.width = imagedata.width
canvas.height = imagedata.height
ctx.putImageData(imagedata, 0, 0)
const image = new Image()
image.src = canvas.toDataURL()
return image
}
async function predict (img, bg, off) {
if (!model) {
model = await load()
}
image = imageFromCanvas(img, bg, off)
tensor = tf.browser
.fromPixels(image, 1)
.mul(-1 / 238)
.add(1)
prediction = await model.predict(tensor.expandDims(0)).data()
return createSequence(prediction)
}
function createSequence (prediction) {
const csl = charset.length
sequence = []
for (let pos = 0; pos < prediction.length; pos += csl) {
const preds = prediction.slice(pos, pos + csl)
const max = Math.max(...preds)
const seqElem = {}
for (let i = 0; i < csl; i++) {
const p = preds[i] / max
const c = charset[i + 1]
if (p >= 0.05) {
seqElem[c || ''] = p
}
}
sequence.push(seqElem)
}
return sequence
}
function postprocess (sequence, overrides) {
csl = charset.length
possibilities = [{ sequence: [] }]
sequence.forEach(function (e, i) {
let additions
if (overrides && overrides[i] !== undefined) {
additions = [{ sym: overrides[i], off: i, conf: 1 }]
} else {
additions = Object.keys(e).map(function (sym) {
return { sym, off: i, conf: e[sym] }
})
}
if (additions.length === 1 && additions[0].sym === '') return
oldpos = possibilities
possibilities = []
oldpos.forEach(function (possibility) {
additions.forEach(function (a) {
const seq = [...possibility.sequence]
if (a.sym !== '') seq.push([a.sym, a.off, a.conf])
const obj = {
sequence: seq
}
possibilities.push(obj)
})
})
})
const res = {}
possibilities.forEach(function (p) {
let line = ''
let lastSym
let lastOff = -1
let count = 0
let prob = 0
p.sequence.forEach(function (e) {
const sym = e[0]
const off = e[1]
const conf = e[2]
if (sym === lastSym && lastOff + 2 >= off) {
return
}
line += sym
lastSym = sym
lastOff = off
prob += conf
count++
})
if (count > 0) prob /= count
if (prob > res[line] || !res[line]) {
res[line] = prob
}
})
let keys = Object.keys(res).sort(function (a, b) {
return res[a] < res[b]
})
const keysFitting = keys.filter(function (x) {
return x.length === 5 || x.length === 6
})
if (keysFitting.length > 0) keys = keysFitting
return keys.map(function (x) {
return { seq: x, prob: res[x] }
})
}
async function imageFromUri (uri) {
if (uri.startsWith('url("')) {
uri = uri.substr(5, uri.length - 7)
}
if (!uri.startsWith('data:')) {
return null
}
const img = new Image()
await new Promise((r) => (img.onload = r), (img.src = uri))
return img
}
async function predictUri (uri, uribg, bgoff) {
const img = await imageFromUri(uri)
const bg = uribg ? await imageFromUri(uribg) : null
const off = bgoff ? parseInt(bgoff) : null
return await predict(img, bg, off)
}
const solveButton = document.createElement('input')
solveButton.id = 't-auto-solve'
solveButton.value = 'Solve'
solveButton.type = 'button'
solveButton.style.fontSize = '11px'
solveButton.style.padding = '0 2px'
solveButton.style.margin = '0px 0px 0px 6px'
solveButton.style.height = '18px'
solveButton.onclick = async function () {
solve(true)
}
const altsDiv = document.createElement('div')
altsDiv.id = 't-auto-options'
altsDiv.style.margin = '0'
altsDiv.style.padding = '0'
let storedPalceholder
let overrides = {}
function placeAfter (elem, sibling) {
if (elem.parentElement !== sibling.parentElement) {
setTimeout(function () {
sibling.parentElement.insertBefore(elem, sibling.nextElementSibling)
}, 1)
}
}
let previousText = null
async function solve (force) {
const resp = document.getElementById('t-resp')
if (!resp) return
const bg = document.getElementById('t-bg')
if (!bg) return
const fg = document.getElementById('t-fg')
if (!fg) return
const help = document.getElementById('t-help')
if (!help) return
placeAfter(solveButton, resp)
placeAfter(altsDiv, help)
// palememe
setTimeout(function () {
toggle(solveButton, bg.style.backgroundImage)
}, 1)
const text = fg.style.backgroundImage
if (!text) {
altsDiv.innerHTML = ''
return
}
if (text === previousText && !force) return
previousText = text
altsDiv.innerHTML = ''
if (!storedPalceholder) storedPalceholder = resp.placeholder
resp.placeholder = 'solving captcha...'
overrides = {}
const sequence = await predictUri(
text,
bg.style.backgroundImage,
force ? bg.style.backgroundPositionX : null
)
const opts = postprocess(sequence)
resp.placeholder = storedPalceholder
showOpts(opts)
}
function showOpts (opts) {
const resp = document.getElementById('t-resp')
if (!resp) return
altsDiv.innerHTML = ''
if (opts.length === 0) {
resp.value = ''
return
}
resp.value = opts[0].seq
// for now don't display options since it seems more difficult to pick than type the whole thing
// eslint-disable-next-line no-constant-condition, no-empty
if (opts.length === 1 || true) {
}
}
const observer = new MutationObserver(async function (mutationsList, observer) {
solve(false)
})
observer.observe(document.body, {
attributes: true,
childList: true,
subtree: true
})

269
src/model.js

@ -0,0 +1,269 @@
window.modelJSON = {
format: 'layers-model',
generatedBy: 'keras v2.4.0',
convertedBy: 'TensorFlow.js Converter v3.7.0',
modelTopology: {
keras_version: '2.4.0',
backend: 'tensorflow',
model_config: {
class_name: 'Sequential',
config: {
name: 'sequential',
layers: [
{
class_name: 'InputLayer',
config: {
batch_input_shape: [null, null, 80, 1],
dtype: 'float32',
sparse: false,
ragged: false,
name: 'conv2d_input'
}
},
{
class_name: 'Conv2D',
config: {
name: 'conv2d',
trainable: true,
batch_input_shape: [null, null, 80, 1],
dtype: 'float32',
filters: 40,
kernel_size: [3, 3],
strides: [1, 1],
padding: 'same',
data_format: 'channels_last',
dilation_rate: [1, 1],
groups: 1,
activation: 'relu',
use_bias: true,
kernel_initializer: {
class_name: 'GlorotUniform',
config: { seed: null }
},
bias_initializer: { class_name: 'Zeros', config: {} },
kernel_regularizer: null,
bias_regularizer: null,
activity_regularizer: null,
kernel_constraint: null,
bias_constraint: null
}
},
{
class_name: 'MaxPooling2D',
config: {
name: 'max_pooling2d',
trainable: true,
dtype: 'float32',
pool_size: [2, 2],
padding: 'same',
strides: [2, 2],
data_format: 'channels_last'
}
},
{
class_name: 'Conv2D',
config: {
name: 'conv2d_1',
trainable: true,
dtype: 'float32',
filters: 60,
kernel_size: [3, 3],
strides: [1, 1],
padding: 'same',
data_format: 'channels_last',
dilation_rate: [1, 1],
groups: 1,
activation: 'relu',
use_bias: true,
kernel_initializer: {
class_name: 'GlorotUniform',
config: { seed: null }
},
bias_initializer: { class_name: 'Zeros', config: {} },
kernel_regularizer: null,
bias_regularizer: null,
activity_regularizer: null,
kernel_constraint: null,
bias_constraint: null
}
},
{
class_name: 'MaxPooling2D',
config: {
name: 'max_pooling2d_1',
trainable: true,
dtype: 'float32',
pool_size: [2, 2],
padding: 'same',
strides: [2, 2],
data_format: 'channels_last'
}
},
{
class_name: 'Reshape',
config: {
name: 'reshape',
trainable: true,
dtype: 'float32',
target_shape: [-1, 1200]
}
},
{
class_name: 'Bidirectional',
config: {
name: 'bidi',
trainable: true,
dtype: 'float32',
layer: {
class_name: 'LSTM',
config: {
name: 'lstm',
trainable: true,
dtype: 'float32',
return_sequences: true,
return_state: false,
go_backwards: false,
stateful: false,
unroll: false,
time_major: false,
units: 200,
activation: 'tanh',
recurrent_activation: 'sigmoid',
use_bias: true,
kernel_initializer: {
class_name: 'GlorotUniform',
config: { seed: null }
},
recurrent_initializer: {
class_name: 'Orthogonal',
config: { gain: 1.0, seed: null }
},
bias_initializer: { class_name: 'Zeros', config: {} },
unit_forget_bias: true,
kernel_regularizer: null,
recurrent_regularizer: null,
bias_regularizer: null,
activity_regularizer: null,
kernel_constraint: null,
recurrent_constraint: null,
bias_constraint: null,
dropout: 0.0,
recurrent_dropout: 0.0,
implementation: 2
}
},
merge_mode: 'concat'
}
},
{
class_name: 'Dense',
config: {
name: 'dense',
trainable: true,
dtype: 'float32',
units: 22,
activation: 'softmax',
use_bias: true,
kernel_initializer: {
class_name: 'GlorotUniform',
config: { seed: null }
},
bias_initializer: { class_name: 'Zeros', config: {} },
kernel_regularizer: null,
bias_regularizer: null,
activity_regularizer: null,
kernel_constraint: null,
bias_constraint: null
}
}
]
}
},
training_config: {
loss: null,
metrics: null,
weighted_metrics: null,
loss_weights: null,
optimizer_config: {
class_name: 'RMSprop',
config: {
name: 'RMSprop',
learning_rate: 0.001,
decay: 0.0,
rho: 0.9,
momentum: 0.0,
epsilon: 1e-7,
centered: false
}
}
}
},
weightsManifest: [
{
paths: ['group1-shard1of1.bin'],
weights: [
{
name: 'bidi/forward_lstm/lstm_cell_4/kernel',
shape: [1200, 800],
dtype: 'float32'
},
{
name: 'bidi/forward_lstm/lstm_cell_4/recurrent_kernel',
shape: [200, 800],
dtype: 'float32'
},
{
name: 'bidi/forward_lstm/lstm_cell_4/bias',
shape: [800],
dtype: 'float32'
},
{
name: 'bidi/backward_lstm/lstm_cell_5/kernel',
shape: [1200, 800],
dtype: 'float32'
},
{
name: 'bidi/backward_lstm/lstm_cell_5/recurrent_kernel',
shape: [200, 800],
dtype: 'float32'
},
{
name: 'bidi/backward_lstm/lstm_cell_5/bias',
shape: [800],
dtype: 'float32'
},
{ name: 'conv2d/kernel', shape: [3, 3, 1, 40], dtype: 'float32' },
{ name: 'conv2d/bias', shape: [40], dtype: 'float32' },
{ name: 'conv2d_1/kernel', shape: [3, 3, 40, 60], dtype: 'float32' },
{ name: 'conv2d_1/bias', shape: [60], dtype: 'float32' },
{ name: 'dense/kernel', shape: [400, 22], dtype: 'float32' },
{ name: 'dense/bias', shape: [22], dtype: 'float32' }
]
}
]
}
// eslint-disable-next-line no-unused-vars
const charset = [
'',
'0',
'2',
'4',
'8',
'A',
'D',
'G',
'H',
'J',
'K',
'M',
'N',
'P',
'Q',
'R',
'S',
'T',
'V',
'W',
'X',
'Y'
]

1
src/weights.js

File diff suppressed because one or more lines are too long
Loading…
Cancel
Save