<?xml version="1.0" encoding="utf-8"?>
<rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom">
    <channel>
        <title>ziwe_ek.log</title>
        <link>https://velog.io/</link>
        <description>그냥 돼지</description>
        <lastBuildDate>Fri, 22 Apr 2022 22:53:02 GMT</lastBuildDate>
        <docs>https://validator.w3.org/feed/docs/rss2.html</docs>
        <generator>https://github.com/jpmonette/feed</generator>
        <copyright>Copyright (C) 2019. ziwe_ek.log. All rights reserved.</copyright>
        <atom:link href="https://v2.velog.io/rss/ziwe_ek" rel="self" type="application/rss+xml"/>
        <item>
            <title><![CDATA[SQL_study (1)]]></title>
            <link>https://velog.io/@ziwe_ek/SQLstudy-1</link>
            <guid>https://velog.io/@ziwe_ek/SQLstudy-1</guid>
            <pubDate>Fri, 22 Apr 2022 22:53:02 GMT</pubDate>
            <description><![CDATA[<h2 id="console">Console</h2>
<p>SQL console consists of Schemas, Query, Action Output</p>
<br/>

<p>SQL do not recognize upper or lower capital.</p>
<br/>


<h2 id="sql-basic-command">SQL Basic command</h2>
<h3 id="4-language">4 Language</h3>
<p>데이터 정의어
Data Definition Language : make, modify, remove table</p>
<p>데이터 조작어
Data Manipulation Language : insert, index, modify, remove data</p>
<p>데이터 제어어
Data Control Language : access, remove data</p>
<p>트랜젝션 제어어
Transaction Control Language : execute, cancel DCL</p>
<br/>

<p>DBA(DataBase Administrator) with DFL, DCL
DA(Data Analyst) with DML, TCL</p>
<br/>

<h2 id="data-definition-language">Data Definition Language</h2>
<h3 id="datatype">DataType</h3>
<p>Each column should be defined as only one data type such as number, string, date, boolean.</p>
<p>number - BIT, INT, BIGINT, FLOAT, DOUBLE
string - CHAR, NCHAR, VARCHAR, NVARCHAR
date - DATETIME, DATE, TIME
*DateBase sets bits to save data efficiently</p>
<h3 id="restrict-condition제약-조건">restrict condition(제약 조건)</h3>
<ol>
<li><p>PK(PRIMARY KEY)
: 중복되어 나타날 수 없는 단일 값
: NOT NULL</p>
</li>
<li><p>NOT NULL
: NULL을 허용하지 않음</p>
</li>
</ol>
<pre><code class="language-sql">/********** DataBase **********/
/* Practice라는 이름으로 데이터베이스 생성 */
CREATE DATABASE Practice;

/* Practice 데이터베이스 사용 */
USE Practice;




/********** Table **********/
/* 회원테이블 생성 */
CREATE TABLE 회원테이블 (
회원번호 INT PRIMARY KEY,  /* Name DataType RestrictCondition */
이름 VARCHAR(20),         /* (바이트 수) */
가입일자 DATE NOT NULL,
수신동의 BIT
);

/* 테이블 명 변경 */
ALTER TABLE 회원테이블 RENAME 회원정보;

/* 회원테이블 조회 */
SELECT * FROM 회원테이블;

/* 테이블 삭제 */ 
DROP TABLE 회원정보;




 /********** Column **********/
 /* 성별 열 추가 */
 ALTER TABLE 회원테이블 ADD 성별 varchar(2);

 /* 성별 열의 타입 변경 */
 ALTER TABLE 회원테이블 MODIFY 성별 VARCHAR(20);

 /* 성별 -&gt; 성 열의 이름 변경 */
 ALTER TABLE 회원테이블 CHANGE 성별 성 VARCHAR(2);
</code></pre>
<h2 id="data-manipulation-language">Data Manipulation Language</h2>
<p>Insert, Find, rewrite, delete Data</p>
<pre><code class="language-sql">USE Practice;

CREATE TABLE 회원테이블 (
회원번호 INT PRIMARY KEY,
이름 VARCHAR(20),
가입일자 DATE NOT NULL,
수신동의 BIT
);



/********** 데이터 삽입 **********/
INSERT INTO 회원테이블 VALUES (1001, &#39;홍길동&#39;, &#39;2020-01-02&#39;, 1);




/********** 제약조건 위반 **********/
/* PRIMARY KEY 위반 */
INSERT INTO 회원테이블 VALUES (1001, &#39;짱구&#39;, &#39;2020-01-06&#39;, 0);

/* NOT NULL 위반 */
INSERT INTO 회원테이블 VALUES (1002, &#39;철수&#39;, NULL, 0);

/* 데이터타입 위반 */
INSERT INTO 회원테이블 VALUES (1003, &#39;훈이&#39;, 1, 0);




/********** 데이터 조회 **********/
/* 모든 열 조회 */
SELECT * 
FROM 회원테이블;

/* 특정 열 조회 */
SELECT 회원번호, 이름
FROM 회원테이블;

/* 특정 열 이름을 변경하여 조회 */
SELECT 회원번호, 
이름 AS 성명
FROM 회원테이블;




/********** 데이터 수정 **********/
/* 모든 데이터 수정 */
UPDATE 회원테이블
SET 수신동의 = 0;

/* 특정 조건 데이터 수정 */
UPDATE 회원테이블
set 수신동의 = 1
WHERE 이름 = &#39;홍길동&#39;;



/********** 데이터 삭제 **********/
/* 특정 데이터 삭제 */
DELETE
FROM 회원테이블
WHERE 이름 = &#39;홍길동&#39;;

/* 모든 데이터 삭제 */
DELETE
FROM 회원테이블;</code></pre>
<h2 id="data-control-language">Data Control Language</h2>
<p>DCL is used when DB admin give access right to usrs.</p>
<pre><code class="language-sql">/********** 사용자 확인 **********/
/* MYSQL 데이터베이스 사용 */
USE MYSQL;



/********** 사용자 **********/
/* 사용자 아이디 및 비밀전호 생성 */
CREATE USER &#39;TEST&#39;@LOCALHOST IDENTIFIED BY &#39;TEST&#39;;

/* 사용자 확인 */
SELECT *
FROM USER;

/* 사용자 비밀번호 변경 */
SET PASSWORD FOR &#39;TEST&#39;@LOCALHOST = &#39;1234&#39;;

/* 사용자 삭제 */
DROP USER &#39;TEST&#39;@LOCALHOST;



/********** 권한 부여 및 제거 **********/
/* 권한 : CREATE, ALTER, DROP, INSERT, DELETE, UPDATE, SELECT 등 */

/* 특정 권한 부여 */
GRANT SELECT, DELETE ON Practice.회원테이블 TO &#39;TEST&#39;@LOCALHOST;

/* 특정 권한 제거 */
REVOKE DELETE ON Practice.회원테이블 FROM &#39;TEST&#39;@LOCALHOST;

/* 모든 권한 부여 */
GRANT ALL ON Practice.회원테이블 TO &#39;TEST&#39;@LOCALHOST;

/* 모든 권한 제거 */
REVOKE ALL ON Practice.회원테이블 FROM &#39;TEST&#39;@LOCALHOST;</code></pre>
<h2 id="transaction-control-language">Transaction Control Language</h2>
<p>Transaction is undividable minimum unit.
When error, roll back
When no error, commit</p>
<pre><code class="language-sql">USE Practice;


DROP TABLE 회원테이블;

CREATE TABLE 회원테이블 (
회원번호 INT PRIMARY KEY,
이름 VARCHAR(20),
가입일자 DATE NOT NULL,
수신동의 BIT
);

SELECT *
FROM 회원테이블;



/********** BEGIN + ROLLBACK **********/
/* 트랜젝션 시작 */
BEGIN;

/* 데이터 삽입 */
INSERT INTO 회원테이블 VALUES (1001, &#39;홍길동&#39;, &#39;2020-01-02&#39;, 1);

/* 취소 */
ROLLBACK;




/********** BEGIN + COMMIT **********/
/* 트랜젝션 시작 */
BEGIN;

/* 데이터 삽입 */
INSERT INTO 회원테이블 VALUES (1005, &#39;장보고&#39;, &#39;2020-01-02&#39;, 1);

/* 실행 */
COMMIT;




/********** 임시저장 SAVEPOINT **********/
/* 트랜젝션 시작 */
BEGIN;

/* 데이터 삽입 */
INSERT INTO 회원테이블 VALUES (1005, &#39;임시저장&#39;, &#39;2020-01-02&#39;, 1);

/* SAVEPOINT 지정 */
SAVEPOINT S1;

/* 1005 회원 이름 수정 */
UPDATE 회원테이블
SET 이름 = &#39;임시저장 후&#39;;

/* SAVEPOINT 지정 */
SAVEPOINT S2;

/* 1005 회원 데이터 삭제 */
DELETE
FROM 회원테이블;

/* SAVEPOINT 지정 */
SAVEPOINT S3;

/* SAVEPOINT로 ROLLBACK */
ROLLBACK TO S2;

/* 실행 */
COMMIT;
</code></pre>
]]></description>
        </item>
        <item>
            <title><![CDATA[React-Native note]]></title>
            <link>https://velog.io/@ziwe_ek/React-Native-note</link>
            <guid>https://velog.io/@ziwe_ek/React-Native-note</guid>
            <pubDate>Fri, 22 Apr 2022 07:59:05 GMT</pubDate>
            <description><![CDATA[<pre><code class="language-jsx">&lt;Text&gt;
    A is {getFullName(&quot;Rum&quot;, &quot;Tum&quot;, &quot;Tugger&quot;)}.
&lt;/Text&gt;

props = {}
() =&gt; {}</code></pre>
<pre><code class="language-jsx">!isHungry
isHungry ? &quot;true&quot; : &quot;false&quot;</code></pre>
]]></description>
        </item>
        <item>
            <title><![CDATA[ES6 : Rest Operator]]></title>
            <link>https://velog.io/@ziwe_ek/ES6-Rest-Operator</link>
            <guid>https://velog.io/@ziwe_ek/ES6-Rest-Operator</guid>
            <pubDate>Thu, 07 Apr 2022 07:30:58 GMT</pubDate>
            <description><![CDATA[<h2 id="rest-operator">Rest Operator</h2>
<p>Rest Operator <code>...</code> converts the rest parameters to JavaScript array.</p>
<br/>

<ol>
<li>Without rest operator<pre><code class="language-javascript">function printNums(num1, num2) {
console.log(num1, num2);
}
</code></pre>
</li>
</ol>
<p>printNums(1, 2, 3, 4, 5);</p>
<pre><code></code></pre><p>1 2</p>
<pre><code>
&lt;br/&gt;

2. `arguments` is the object of all the parameters.

```javascript
function printNums(num1, num2) {
  console.log(arguments);
}

printNums(1, 2, 3, 4, 5);</code></pre><pre><code>[Arguments] { &#39;0&#39;: 1, &#39;1&#39;: 2, &#39;2&#39;: 3, &#39;3&#39;: 4, &#39;4&#39;: 5 }</code></pre><br/>

<ol start="3">
<li><code>...</code> is combine the rest parameters as an object.</li>
</ol>
<pre><code class="language-javascript">function printNums(num1, ...num2) {
  console.log(num1, num2);
}

printNums(1, 2, 3, 4, 5);</code></pre>
<pre><code>1 [ 2, 3, 4, 5 ]</code></pre>]]></description>
        </item>
        <item>
            <title><![CDATA[ES6.3 : for ... in / of... iterations]]></title>
            <link>https://velog.io/@ziwe_ek/ES6.3-for-...-in-of...-iterations</link>
            <guid>https://velog.io/@ziwe_ek/ES6.3-for-...-in-of...-iterations</guid>
            <pubDate>Thu, 07 Apr 2022 07:11:25 GMT</pubDate>
            <description><![CDATA[<p><code>for</code> iteration are divided into <code>for ... in ...</code> and <code>for ... of ...</code>. </p>
<ul>
<li><code>for ... in ...</code> can access the key.</li>
</ul>
<ul>
<li><code>for ... of ...</code> can access the value.</li>
</ul>
<br/>

<h2 id="for--in-"><code>for ... in ...</code></h2>
<pre><code class="language-javascript">let arr = [10,20,30,40]

for (let val in arr) {
  console.log(val)
}</code></pre>
<pre><code>0
1
2
3</code></pre><br/>

<pre><code class="language-javascript">let obj = {
  a: 1,
  b: 2,
  c: 3,
};

for (let val in obj) {
  console.log(val);
}
</code></pre>
<pre><code>a
b
c</code></pre><br/>

<p>With <code>for ... in ...</code>, you can get value using indexing.</p>
<pre><code class="language-javascript">let array = [10, 20, 30, 40];

for (let val in array) {
  console.log(array[val]);
}</code></pre>
<pre><code>10
20
30
40</code></pre><br/>


<h2 id="for--of-"><code>for ... of ...</code></h2>
<pre><code class="language-javascript">let array = [10, 20, 30, 40];

for (let val of array) {
  console.log(val);
}</code></pre>
<pre><code>10
20
30
40</code></pre><br/>

<pre><code class="language-javascript">let obj = {
  a: 1,
  b: 2,
  c: 3,
};

for (let val of obj) {
  console.log(val);
}</code></pre>
<pre><code>TypeError: obj is not iterable
    at Object.&lt;anonymous&gt; (/Users/basecamp/repo/ES6/es6.js:14:17)
    at Module._compile (node:internal/modules/cjs/loader:1103:14)
    at Object.Module._extensions..js (node:internal/modules/cjs/loader:1157:10)
    at Module.load (node:internal/modules/cjs/loader:981:32)
    at Function.Module._load (node:internal/modules/cjs/loader:822:12)
    at Function.executeUserEntryPoint [as runMain] (node:internal/modules/run_main:77:12)
    at node:internal/main/run_main_module:17:47</code></pre>]]></description>
        </item>
        <item>
            <title><![CDATA[ES6.2 String literal]]></title>
            <link>https://velog.io/@ziwe_ek/ES6.2-String-literal</link>
            <guid>https://velog.io/@ziwe_ek/ES6.2-String-literal</guid>
            <pubDate>Thu, 07 Apr 2022 06:56:08 GMT</pubDate>
            <description><![CDATA[<h2 id="string-literal">String literal</h2>
<p>String literal is a effective way to make string using defined variables.</p>
<pre><code class="language-javascript">const var1 = &#39;hello&#39;
const var2 = &#39;world&#39;

const StrLit = `${var1}, ${var2}!!`
console.log(StrLit)</code></pre>
<pre><code>hello world!!</code></pre>]]></description>
        </item>
        <item>
            <title><![CDATA[ES6. 1]]></title>
            <link>https://velog.io/@ziwe_ek/ES6.-1</link>
            <guid>https://velog.io/@ziwe_ek/ES6.-1</guid>
            <pubDate>Thu, 07 Apr 2022 06:50:23 GMT</pubDate>
            <description><![CDATA[<h3 id="1-variable">1. Variable</h3>
<p>JavaScript supports <code>var</code>, <code>let</code>, <code>const</code> to define variables.</p>
<p>However, you had better not to use <code>var</code>.</p>
<p>The reason is that</p>
<ul>
<li>var is restricted by the scope.</li>
<li>var does not arise error even defined twice</li>
</ul>
<p>So, you should use <code>let</code> instead of <code>var</code> for rewritable variables. And you should use <code>const</code> for constant variables.</p>
<pre><code class="language-javascript">var hello = &#39;hello&#39;

if (true) {
  var hello = &#39;world&#39;
  console.log(hello)
}

console.log(hello)</code></pre>
<pre><code></code></pre>]]></description>
        </item>
        <item>
            <title><![CDATA[Numpy(2) : Make Array(2)]]></title>
            <link>https://velog.io/@ziwe_ek/Numpy2-Make-Array2</link>
            <guid>https://velog.io/@ziwe_ek/Numpy2-Make-Array2</guid>
            <pubDate>Thu, 07 Apr 2022 02:16:45 GMT</pubDate>
            <description><![CDATA[<h2 id="one-dimensional-array--scala">One dimensional array : Scala</h2>
<pre><code class="language-python">import numpy as np

a = np.array([1,2,3], dtype=int)
b = np.array([1.1,2.2,3.3], dtype=float)
c = np.array([1,1,0], dtype=bool)

print(a)
print(a.dtype)

print(b)
print(b.dtype)

print(c)
print(c.dtype)</code></pre>
<pre><code>[1 2 3]
int64
[1.1 2.2 3.3]
float64
[ True  True False]
bool</code></pre><p>You can create an array using <code>np.array({list}, dtype={int/float/bool})</code>. And it can be returned by <code>.dype</code> property. If you omit <code>dtype={}</code> parameter, it automatically recognize the type of list.</p>
<br/>

<h2 id="two-dimensional-array--matrix">Two dimensional array : Matrix</h2>
<pre><code class="language-python">import numpy as np

a = np.array([[1,2,3],[4,5,6],[7,8,9]])

print(a)</code></pre>
<pre><code>[[1 2 3]
 [4 5 6]
 [7 8 9]]</code></pre><p>The list for the multi dimensional array is composed of overlapped lists such as <code>[[1,2,3],[4,5,6],[7,8,9]]</code>. From that the </p>
<br/>

<h2 id="multi-dimensional-array">Multi dimensional array</h2>
<pre><code class="language-python">import numpy as np

a = np.array([[[1,2],[3,4]],[[5,6],[7,8]],[[9,10],[11,12]]])

print(a)</code></pre>
<pre><code>[[[ 1  2]
  [ 3  4]]

 [[ 5  6]
  [ 7  8]]

 [[ 9 10]
  [11 12]]]</code></pre><p>The list for the multi dimensional array is composed of multi overlapped lists such as <code>[[[1,2],[3,4]],[[5,6],[7,8]],[[9,10],[11,12]]]</code>. It is printed being seperated by Matrix .</p>
<br/>

<h2 id="attribute-of-array">Attribute of array</h2>
<pre><code class="language-python">import numpy as np

a = np.array([[1,2,3],[4,5,6],[7,8,9]])
b = np.array([[[1,2],[3,4]],[[5,6],[7,8]],[[9,10],[11,12]]])

print(a.ndim)
print(a.shape)
print(a.dtype)
print()

print(b.ndim)
print(b.shape)
print(b.dtype)</code></pre>
<pre><code>2
(3, 3)
int64

3
(3, 2, 2)
int64</code></pre><p><code>.ndim</code> returns the number of dimension of array.
<code>.shape</code> returns the shape of array.
<code>.dtype</code> returns the data type of array.</p>
<br/>
]]></description>
        </item>
        <item>
            <title><![CDATA[Numpy(1) : Make Array]]></title>
            <link>https://velog.io/@ziwe_ek/Numpy1-Make-Array</link>
            <guid>https://velog.io/@ziwe_ek/Numpy1-Make-Array</guid>
            <pubDate>Thu, 07 Apr 2022 01:57:32 GMT</pubDate>
            <description><![CDATA[<p>Numpy is a module for operating Vector and Matrix.</p>
<br/>

<h2 id="install-the-module">Install the module</h2>
<pre><code class="language-python">conda install numpy</code></pre>
<pre><code># If success, there would be no response.</code></pre><br/>

<h2 id="import-the-module">Import the module</h2>
<pre><code class="language-python">import numpy as np</code></pre>
<pre><code># If success, there would be no response.</code></pre><br/>

<h2 id="make-array-from-list">Make array from list</h2>
<pre><code class="language-python">import numpy as np

a_list = [1,2,3,4,5]
a_np = np.array(a_list)

print(a_list)
print(a_np)</code></pre>
<pre><code>[1, 2, 3, 4, 5]
[1 2 3 4 5]</code></pre><p>There is a difference between list and array. The element of list is divided by comma. In contrast, the element of array is divided by space.</p>
<br/>

<h2 id="copy-array">Copy array</h2>
<pre><code class="language-python">import numpy as np

a_np = np.array([1,2,3,4,5])
b_np = a_np
c_np = a_np.copy()

b_np[0] = 99

print(a_np)
print(b_np)
print(c_np)</code></pre>
<pre><code>[99  2  3  4  5]
[99  2  3  4  5]
[1 2 3 4 5]</code></pre><p><code>copy()</code> Method duplicates the original array. This copy is not a reference but a seperate.</p>
<br/>

<h2 id="indexing-slicing-array">Indexing, Slicing array</h2>
<pre><code class="language-python">import numpy as np

a_np = np.array([1,2,3,4,5])

print(a_np[0])
print(a_np[0:2])</code></pre>
<pre><code>1
[1 2]</code></pre><p>Array can be indexed and sliced as similar to list.</p>
<br/>

<h2 id="broadcast-array">Broadcast array</h2>
<pre><code class="language-python">import numpy as np

a_np = np.array([1,2,3,4,5])
b_np = np.array([1,2,3,4,5])
c_np = np.array([1,2,3])

print(a_np + 2)
print(a_np - 2)
print(a_np * 2)
print(a_np / 2)</code></pre>
<pre><code>[3 4 5 6 7]
[-1  0  1  2  3]
[ 2  4  6  8 10]
[0.5 1.  1.5 2.  2.5]</code></pre><p>By numpy. we can operate calculation between vector and scala. This is one of the differences from list.</p>
<br/>

<h2 id="operate-array">Operate array</h2>
<pre><code class="language-python">import numpy as np

a_np = np.array([1,2,3,4,5])
b_np = np.array([1,2,3,4,5])
c_np = np.array([1,2,3])

print(a_np + b_np)
print(a_np - b_np)
print(a_np * b_np)
print(a_np / b_np)</code></pre>
<pre><code>[ 2  4  6  8 10]
[0 0 0 0 0]
[ 1  4  9 16 25]
[1. 1. 1. 1. 1.]</code></pre><p>By numpy, we can operate calculations between vectors. In that, the elements of operands should be in match of each.</p>
]]></description>
        </item>
        <item>
            <title><![CDATA[Object]]></title>
            <link>https://velog.io/@ziwe_ek/Object</link>
            <guid>https://velog.io/@ziwe_ek/Object</guid>
            <pubDate>Mon, 28 Mar 2022 09:42:58 GMT</pubDate>
        </item>
        <item>
            <title><![CDATA[Review : Pythonic?]]></title>
            <link>https://velog.io/@ziwe_ek/Pythonic</link>
            <guid>https://velog.io/@ziwe_ek/Pythonic</guid>
            <pubDate>Mon, 28 Mar 2022 09:41:25 GMT</pubDate>
            <description><![CDATA[<h2 id="pythonic">Pythonic</h2>
<p>Fisrt of all, Can you answer the below question?</p>
<blockquote>
<p>What is pythonic? </p>
</blockquote>
<p>Maybe, Pythonic means python-friendly.</p>
<p>And then, what about this question?</p>
<blockquote>
<p>How can we code scripts pythonically?</p>
</blockquote>
<p>In fact, there is a guideline which a lot of people agreed. It is call PEP8. But, it is not like we should follow it literally such as tax laws.</p>
<p>I think Python is one of the most intuitive language.</p>
<p>As known, Algorithm is a kind of procedure which contains special intensions. From this, we can realize that good algorithm comes from the systematic sequence of tasks.</p>
<p>Human recognize the procedure</p>
]]></description>
        </item>
        <item>
            <title><![CDATA[Basic : Softmax Layer]]></title>
            <link>https://velog.io/@ziwe_ek/Basic-Softmax-Layer</link>
            <guid>https://velog.io/@ziwe_ek/Basic-Softmax-Layer</guid>
            <pubDate>Sat, 26 Mar 2022 19:50:40 GMT</pubDate>
            <description><![CDATA[<h2 id="softmax-layer">Softmax Layer</h2>
<p>Softmax is userd for multinominal classification.</p>
<p>The number of classifiers of the classification comes from the number of the neurons in the last layer.</p>
<p>And this is the number of the outputs as probability.</p>
<p>In Tensorflow, Softmax is a kind of Activation-Function.</p>
<p>Each output for the neuron of the last layer can be recognized as a logit.</p>
<p>$$
(\overrightarrow{p})^{T}
:\
S_i((\overrightarrow{l})^{T}) = p_i = {{e^{l_i}} \over {\sum_{k=1}^{K} [e^{l_k}]}}
:\
(\overrightarrow{l})^{T}
$$</p>
<p>In softmax, the logit vector is converted to the probability vector. And the sum of the element of the probability vector equals 1.</p>
<p>For binary classification, there are two ways. One is to use Sigmoid with the last one neuron, another is to use Softmax with the last two neurons.</p>
<p>Actually, there is only Affine-Function and no Activation-Function at output layer. Instead of that, Softmax converts the vector z to the vector p</p>
<p>$$
(\overrightarrow{p})^{T}
:\
(\overrightarrow{z})^{[O]}
:\
(\overrightarrow{a})^{[O-1]}
$$</p>
<p>In conclusion, Softmax is converter from logits to probabilities,  Sigmoid is converter from a logit to aprobability.</p>
<h2 id="softmax-layer-1">Softmax Layer</h2>
<p>$$
\hat{Y}^{T}
:\
Softmax
:\
L^{[O]}
:\
L^{[I]}
:\
X^T
$$</p>
]]></description>
        </item>
        <item>
            <title><![CDATA[Basic : Sigmoid and Softmax]]></title>
            <link>https://velog.io/@ziwe_ek/Basic-Sigmoid-and-Softmax</link>
            <guid>https://velog.io/@ziwe_ek/Basic-Sigmoid-and-Softmax</guid>
            <pubDate>Sat, 26 Mar 2022 17:28:57 GMT</pubDate>
            <description><![CDATA[<h2 id="why-sigmoid-softmax">Why Sigmoid? Softmax?</h2>
<p>In Deep-Learning process, learn depends on loss. loss is the scale of the difference between the dataset which is prepared and the output from the model.</p>
<p>sigmoid, softmax is mainly used for image classification.</p>
<p>There are reasons why starters should focus on the image classification. One is that it is easier than the other DL processes. Another is that there are lots of references because image classification is one of the first fields.</p>
<br/>


<h2 id="odds">Odds</h2>
<p>Odds are the ratio of the probability of one event to that of an alternative event. So, if odds are bigger than 1, the probability of one event is more than that of an alternative event. In contrast, if odds are smaller than 1, the probability of one event is less than that of an alternative event.</p>
<p>$$
o = {{p} \over {1-p}}
$$</p>
<p>This is just another way to express the probability of an event happening or not. And, the closer the probability goes to 1, the odds diverge infinitely.</p>
<br/>


<h2 id="logit">Logit</h2>
<p>However, the graph of odds are not symmetric. So, it is required to put log on odds. This is called logit.</p>
<p>$$
l = log({{p} \over {1-p}}) \
\quad\quad\quad\quad
= log(p) - log(1-p)
$$</p>
<p>Logit is symmetric with respect to 0.5 at which odds are 1 and logit is 0. And logit diverges not only positively but also negatively.</p>
<br/>


<h2 id="logit-and-sigmoid">Logit and Sigmoid</h2>
<p>The inverse function of logit is the Sigmoid equation which returns probability from logit.
$$
p = {{1} \over {1+{e^{-l}}}} \quad (0 \le p \le 1)
$$
Sigmoid is symmetric with respect to 0 and there are horizontal asymptotes on 0 and on 1.</p>
<p>As logit diverges, the value of Affine-Transfromation also diverges. Because of this, We can deal with the value of Affine-Transfromation as a kind of logit. In other words, if the value of Affine-Transfromation passes through the sigmoid equation, it would be converted to the probability. Therefore, in this process, Deep Learning learns the probability.</p>
<p>Sigmoid is used for binary classification.
Softmax is used for multinominal classification.</p>
<p>Logistic Regression is based on this.</p>
<p>$$
\overrightarrow{p}
;\
;\
\overrightarrow{z}^{[1]}
;\
;\
X^T
$$</p>
<p>Affine-Function -&gt; logit
Activation-Function -&gt; probability</p>
]]></description>
        </item>
        <item>
            <title><![CDATA[[Python] Tips : Special Method]]></title>
            <link>https://velog.io/@ziwe_ek/Python-Tips-Special-Method</link>
            <guid>https://velog.io/@ziwe_ek/Python-Tips-Special-Method</guid>
            <pubDate>Sat, 26 Mar 2022 12:59:46 GMT</pubDate>
            <description><![CDATA[<h2 id="special-method">Special Method?</h2>
<p>Special Method, which is also called Magic Method, is a kind of method that can be set in user-defined class objects. It is usually surrounded by two Under-bars at head and tail of the keyword like <code>__init__</code>, <code>__str__</code>, etc.</p>
<h2 id="why-do-we-need-this">Why do we need this?</h2>
<p>With Spcial Method, your script must be more pythonic and more efficient.</p>
<p>You can</p>
<ul>
<li>Use the instance of the class as built-in types, such as data-types, functions, etc.</li>
<li></li>
<li></li>
</ul>
<h2 id="how-to-use-special-method">How to use Special Method</h2>
<p>Take a look at the below code. At the bottom, I commanded to <code>print()</code> the parameter with the identifiter <code>japan</code>. At this, the <code>japan</code> is the instance of the class which I defined. So, the code prints the infomation of the type about the <code>japan</code>.</p>
<pre><code class="language-python">class country:
  def __init__(self, name, population):
    self.name = name
    self.population = population

japan = country(&#39;Japan&#39;, &#39;125m&#39;)

print(japan)</code></pre>
<pre><code class="language-python"># &lt;__main__.country object at 0x7f0d90322290&gt;</code></pre>
<p>As Known, this is the expected output. However, there will be something different with Special Method.</p>
<pre><code class="language-python">class country:
  def __init__(self, name, population):
    self.name = name
    self.population = population

  # Add Specical Method inside the class
  def __str__(self):
    return f&#39;name : {self.name}, population : {self.population}&#39;

japan = country(&#39;Japan&#39;, &#39;125m&#39;)

print(japan)</code></pre>
<pre><code># name : Japan, population : 125m</code></pre><p>At the bottom, I also commanded to <code>print()</code> the parameter with the identifiter <code>japan</code>. And then, the code prints the string from <code>return</code> of instance method &#39;<code>__str__()</code>&#39; inside the class <code>japan</code>.</p>
<p>Therefore, you can get string from class, just as parameter.</p>
<br/>

<pre><code class="language-python">class country:
  def __init__(self, name, population):
    self.name = name
    self.population = population

japan = country(&#39;Japan&#39;, &#39;125m&#39;)

japan()</code></pre>
<pre><code>---------------------------------------------------------------------------
TypeError                                 Traceback (most recent call last)
&lt;ipython-input-3-88c499f4f664&gt; in &lt;module&gt;()
      6 japan = country(&#39;Japan&#39;, &#39;125m&#39;)
      7 
----&gt; 8 japan()

TypeError: &#39;country&#39; object is not callable</code></pre><br/>


<pre><code class="language-python">class country:
  def __init__(self, name, population):
    self.name = name
    self.population = population

  def __call__(self):
    print(f&#39;name : {self.name}, population : {self.population}&#39;)

japan = country(&#39;Japan&#39;, &#39;125m&#39;)

japan()</code></pre>
<pre><code>name : Japan, population : 125m</code></pre>]]></description>
        </item>
        <item>
            <title><![CDATA[Basic : Mini-Batch in Dense Layer]]></title>
            <link>https://velog.io/@ziwe_ek/Basic-Mini-Batch-in-Dense-Layer</link>
            <guid>https://velog.io/@ziwe_ek/Basic-Mini-Batch-in-Dense-Layer</guid>
            <pubDate>Fri, 25 Mar 2022 14:10:52 GMT</pubDate>
            <description><![CDATA[<h2 id="mini-batch-in-dense-layer">Mini-Batch in Dense Layer</h2>
<p>shape?</p>
<p>calculation?</p>
<p>output?</p>
<p>$l_{\overrightarrow{v}}$ : The length of v Vector</p>
<p>$n_{\overrightarrow{v}}$ : The number of v Vector</p>
<p>$i_{(l_{\overrightarrow{v}})}$ : The index of (the length of v Vector)</p>
<p>$m^{[from]}_{coordinate}$ : The element of M Marix</p>
<p>The length of </p>
<p>$$
{A}^{T} = 
(, \ddots {{a}<em>{(i</em>{(N_{\overrightarrow{x}})},  :i_{(l_{\overrightarrow{a}})})}^{[i_{L}]}}\ddots ,)
\in : 
{\R}^{N_{\overrightarrow{x}} \times l_{\overrightarrow{\nu}^{[i_{(L)}]}}}</p>
<p>:\
:\</p>
<p>\uparrow\
Dense,Layer \
|\</p>
<p>:\
:\</p>
<p>{X}^{T} = (, \ddots  {x}^{[i_{(N_{\overrightarrow{x}})}]}<em>{i</em>{(l_{\overrightarrow{x}})}}\ddots ,) \in : {\R}^{N_{\overrightarrow{x}} \times l_{\overrightarrow{x}}}
$$</p>
<p>$$
{N_{\overrightarrow{x}} \times 
(
l_{\overrightarrow{x}}} \rightarrow l_{\overrightarrow{\nu}^{[i_{(L)}]}}
)
$$</p>
<p>Passing through the layers, Dense Layer converts the columns of the Materix, from the number of x Vector to the number of $\nu$ Vector.</p>
<p>$$
z^{[1]}<em>{i,: j} = (\overrightarrow{x}^{(i)})^T \cdot\overrightarrow{w}^{[1]}</em>{j} + b^{[1]}_{j}
$$</p>
<p>$$
a^{[1]}<em>{i,: j} = g(z^{[1]}</em>{i,: j})
$$</p>
<p>You have to observe the shape. </p>
<p>The Activation is one-to-one correspondence. The shape do not change.</p>
<p>So. in a column, as neuron-wise, the same neuron
And, in a row, as Batch-wise, the same layer</p>
<p>Batch is the set of data, X Matrix</p>
<h2 id="cascaded-dense-layer">Cascaded Dense Layer</h2>
<p>$$
X^T = \R^{n_{\overrightarrow{x}} \times l_{\overrightarrow{x}}}
$$</p>
<p>$$
W = \R^{l_{\overrightarrow{x}} \times n_{\overrightarrow{\nu}}}
:\
B = \R^{n_{\overrightarrow{x}} \times n_{\overrightarrow{\nu}}}
$$</p>
<p>$$
Z^T = \R^{n_{\overrightarrow{x}} \times n_{\overrightarrow{\nu}}}
$$</p>
<p>$$
A^T = \R^{n_{\overrightarrow{x}} \times n_{\overrightarrow{\nu}}}
$$</p>
<p>I think that it is much more proper to count neurons using not length but number because neurons are indenpendent each other like the relations among the x vectors of  the X matrix.</p>
]]></description>
        </item>
        <item>
            <title><![CDATA[Basic : Generalized Dense Layers]]></title>
            <link>https://velog.io/@ziwe_ek/Basic-Generalized-Dense-Layers</link>
            <guid>https://velog.io/@ziwe_ek/Basic-Generalized-Dense-Layers</guid>
            <pubDate>Thu, 24 Mar 2022 18:52:20 GMT</pubDate>
            <description><![CDATA[<h2 id="dimensions-of-dense-layer">Dimensions of Dense Layer</h2>
<p>Cascaded structure.</p>
<p>Mxnet is the framework which controls multi GPUs at once. Because there are Include-Top classifier which is parameter in Mxnet, the drawings of the Dense Layers is better to be drawn from bottom to top.</p>
<br/>

<blockquote>
<center><strong>The D</strong></center>
<br/>

<p>$$
(\overrightarrow{a}^{[l_L]})^{T} \in \R^{1 \times {l_{\nu^{[l_L]}}}}\
:\
\vdots\
:\
(\overrightarrow{a}^{[2]})^{T} \in \R^{1 \times {l_{\nu^{[2]}}}}\
:\
\uparrow \
L_2 = (\dots : \nu^{[2]}<em>{i} : \dots) \
|\
: \
(\overrightarrow{a}^{[1]})^{T} \in \R^{1 \times {l</em>{\nu^{[1]}}}}\
: \
\uparrow \
L_1 = (\dots : \nu^{[1]}_{i} : \dots) \
| \
: \
(\overrightarrow{x})^{T} \in \R^{1 \times {l_x}}
$$</p>
</blockquote>
<br/>

<p>The number of parameter is </p>
<p>$$(l_x \times l_{\nu}) + (1 \times l_{\nu})$$</p>
<p>$$(l_x+1) \times l_{\nu}$$</p>
<p>In Dense Layer, as the network keep passing through the layers, the number of parameter considerably inceases.</p>
<h2 id="forward-propagation-of-the-second-dense-layer">Forward Propagation of The Second Dense Layer</h2>
<h2 id="the-second-dense-layer">The Second Dense Layer</h2>
<h2 id="generalized-dense-layer">Generalized Dense Layer</h2>
<blockquote>
<center><strong>[ Generalized Dense Layer ]</strong></center>
<br/>

<p>$$
\vdots\
(\overrightarrow{a}^{[i]})^{T} \in \R^{1 \times {l_{\nu^{[i]}}}}\
:\
\uparrow \
L_i = (\dots : \nu^{[i]}<em>{i} : \dots) \
|\
: \
(\overrightarrow{a}^{[i-1]})^{T} \in \R^{1 \times {l</em>{\nu^{[i-1]}}}}\
\vdots\
$$</p>
</blockquote>
]]></description>
        </item>
        <item>
            <title><![CDATA[Basic : The First Dense Layer]]></title>
            <link>https://velog.io/@ziwe_ek/Basic-The-First-Dense-Layer</link>
            <guid>https://velog.io/@ziwe_ek/Basic-The-First-Dense-Layer</guid>
            <pubDate>Thu, 24 Mar 2022 16:54:17 GMT</pubDate>
            <description><![CDATA[<h2 id="parameters-of-dense-layer">Parameters of Dense Layer</h2>
<p>$$
(\overrightarrow{x})^T = (x_1 \dots x_i)
$$</p>
<br/>
<center>goes through</center>

<p>$$
\dots : {L}_i : \dots
$$
<br/></p>
<center>, which is composed of</center>

<p>$$
\dots : {\nu}_{i}^{:[i_L]} : \dots
$$</p>
<br/>
<center>, and which is composed of</center>

<p>$$
\dots : \overrightarrow{w}<em>{i,: i</em>{\nu}}^{[i_L]} : \dots \in \R^{l_w \times 1}
$$
$$
: \dots : {b}<em>{: : i</em>{\nu}}^{[i_L]} : \dots \in \R^{1 \times 1}
$$</p>
<br/>


<p>There are two reasons why the weights is arrayed in column vector : One is that Algebra sets column vecters as the default, the other is that especially dense layer read the vector of weights in the column type.</p>
<h2 id="weighted-matrix-and-bias-vector">Weighted Matrix and Bias Vector</h2>
<p>combine the column vectors of the weights as matrix.</p>
<p>The shape of the weight matrix is that the length of Input times the length of Output.</p>
<p>$$
{W}^{[i_L]} \in \R^{l_w \times l_{\nu}}
$$
$$
: \overrightarrow{b}^{[i_L]} \in \R^{1 \times l_{\nu}}
$$</p>
<h2 id="forward-propagation-of-dense-layer">Forward Propagation of Dense Layer</h2>
<p>$$
{a}<em>i^{[i_L]} = \nu_i^{:[i_L]}((\overrightarrow{x})^T; : \overrightarrow{w}</em>{i,: i_{\nu}}^{[i_L]}, : b_{: : i_{\nu}}^{[i_L]})
$$</p>
<p>$$
(\overrightarrow{a}^{[i_L]})^T = (\overrightarrow{x})^T \cdot {W}^{[i_L]} + \overrightarrow{b}^{[i_L]}
$$</p>
<br/>
<br/>
<br/>

<p>$$
(\overrightarrow{x})^T \in {\R}^{1 \times {l_x}}
$$</p>
<br/>
<center>becomes</center>

<p>$$
(\overrightarrow{a})^T \in {\R}^{1 \times {l_{\nu}}}
$$</p>
]]></description>
        </item>
        <item>
            <title><![CDATA[Tips : To print multi-dimensional data in better way]]></title>
            <link>https://velog.io/@ziwe_ek/Tips-To-print-multi-dimensional-data-in-better-way</link>
            <guid>https://velog.io/@ziwe_ek/Tips-To-print-multi-dimensional-data-in-better-way</guid>
            <pubDate>Thu, 24 Mar 2022 10:10:06 GMT</pubDate>
            <description><![CDATA[<pre><code class="language-python">aaa = [[1,2,3],[4,5,6],[7,8,9]],[[10,11,12],[13,14,15],[16,17,18]]

for i, row in enumerate(aaa):
    print(i)
    print(*row, sep=&#39;\n&#39;)
    print()</code></pre>
<pre><code>0
[1, 2, 3]
[4, 5, 6]
[7, 8, 9]

1
[10, 11, 12]
[13, 14, 15]
[16, 17, 18]</code></pre><pre><code class="language-python">aaa = [[1,2,3],[4,5,6],[7,8,9]],[[10,11,12],[13,14,15],[16,17,18]]

for i, row in enumerate(aaa):
    print(i)
    print(*row, sep=&#39;\n&#39;)
    print()</code></pre>
<pre><code>0
[1, 2, 3]
[4, 5, 6]
[7, 8, 9]

1
[10, 11, 12]
[13, 14, 15]
[16, 17, 18]</code></pre>]]></description>
        </item>
        <item>
            <title><![CDATA[Basic : Dense Layer]]></title>
            <link>https://velog.io/@ziwe_ek/Dense-Layers</link>
            <guid>https://velog.io/@ziwe_ek/Dense-Layers</guid>
            <pubDate>Thu, 24 Mar 2022 07:48:14 GMT</pubDate>
            <description><![CDATA[<h2 id="neuron-vector-and-layer">Neuron Vector and Layer</h2>
<p>Layer is the vector of Neurons, which are kinds of Parametric Function and generally have their own weight and their own bias for Featuring(Filtering).</p>
<p>Filterbank is a bundle of filters such as music equalizer. Because each filter of the bank is not affected by the input, the </p>
<p>Deep learning is based on the cascadee-structured filterbank</p>
<p>How a deep learning architecture works is making a filterbank with bundles of correlated neurons in the cascaded structure.</p>
<h2 id="dense-layer">Dense Layer</h2>
<p>The classification of the layer type depends on how the elements of the input are picked for passing through the layers.</p>
<p>In Dense layer, all the elements of the input is connected to the each neuron of the layers.</p>
<p>Network composes of Edge and Node.</p>
<blockquote>
<p>Layer : $L^{[i]}$ ($L$ for layer)
Neuron Vector : $\overrightarrow{\nu}^{[i]}$
# of Neurons : $l_i$ ($l$ for length)</p>
</blockquote>
<p>$$
\overrightarrow{a}^{[i-1]}
$$</p>
<br/>

<p>$$
\overrightarrow{\nu}^{[i]}(\overrightarrow{a}^{[i-1]})
$$</p>
<br/>

<p>$$
\overrightarrow{a}^{[i]}
$$</p>
]]></description>
        </item>
        <item>
            <title><![CDATA[Tips : Join]]></title>
            <link>https://velog.io/@ziwe_ek/Tips-Join</link>
            <guid>https://velog.io/@ziwe_ek/Tips-Join</guid>
            <pubDate>Thu, 24 Mar 2022 02:39:47 GMT</pubDate>
            <description><![CDATA[<pre><code class="language-python"># join() cannot combine any other data type except string.
# Because Python reports TypeError when you try to concatenate int and string.
aaa = [1,2,3]
&#39;&#39;.join(aaa)</code></pre>
<pre><code>&#39;123&#39;</code></pre><pre><code class="language-python"># join() only can combine string.
aaa = [&#39;1&#39;,&#39;2&#39;,&#39;3&#39;]
&#39;&#39;.join(aaa)</code></pre>
<pre><code class="language-python">1 + &#39;1&#39;</code></pre>
<pre><code>---------------------------------------------------------------------------
TypeError                                 Traceback (most recent call last)
/var/folders/y1/pfjkj5cd0wz3vhn094nv_yl80000gn/T/ipykernel_18846/2877782753.py in &lt;module&gt;
----&gt; 1 1 + &#39;1&#39;

TypeError: unsupported operand type(s) for +: &#39;int&#39; and &#39;str&#39;</code></pre>]]></description>
        </item>
        <item>
            <title><![CDATA[Artificial Neuron]]></title>
            <link>https://velog.io/@ziwe_ek/Artificial-Neuron</link>
            <guid>https://velog.io/@ziwe_ek/Artificial-Neuron</guid>
            <pubDate>Wed, 23 Mar 2022 23:55:37 GMT</pubDate>
            <description><![CDATA[<h2 id="code1-3-1-activation-layers">Code.1-3-1: Activation Layers</h2>
<pre><code class="language-python"># In Activation layer, only activation

import tensorflow as tf

from tensorflow.math import exp, maximum
from tensorflow.keras.layers import Activation  # Activation is a kind of Layers

x = tf.random.normal(shape=(1, 5)) # input setting

# imp. activation function
sigmoid = Activation(&#39;sigmoid&#39;)
tanh = Activation(&#39;tanh&#39;)
relu = Activation(&#39;relu&#39;)

# forward propagation(TensorFlow)
y_sigmoid_tf = sigmoid(x)
y_tanh_tf = tanh(x)
y_relu_tf = relu(x)

# forward propagation(manual)
y_sigmoid_man = 1 / (1 + exp(-x))
y_tanh_man = (exp(x) - exp(-x)) / (exp(x) + exp(-x))
y_relu_man = maximum(x, 0)


print(f&#39;x: {x.shape}\n{x.numpy()}&#39;)
print()
print(f&#39;Sigmoid(TensorFlow): {y_sigmoid_tf.shape}\n{y_sigmoid_tf.numpy()}&#39;)
print(f&#39;Sigmoid(TensorFlow): {y_sigmoid_man.shape}\n{y_sigmoid_man.numpy()}&#39;)
print()
print(f&#39;Tanh(TensorFlow): {y_tanh_tf.shape}\n{y_tanh_tf.numpy()}&#39;)
print(f&#39;Tanh(TensorFlow): {y_tanh_man.shape}\n{y_tanh_man.numpy()}&#39;)
print()
print(f&#39;ReLU(TensorFlow): {y_relu_tf.shape}\n{y_relu_tf.numpy()}&#39;)
print(f&#39;ReLU(TensorFlow): {y_relu_man.shape}\n{y_relu_man.numpy()}&#39;)</code></pre>
<pre><code>x: (1, 5)
[[ 0.4140593  -1.0886137  -1.9466102   1.297912   -0.11210307]]

Sigmoid(TensorFlow): (1, 5)
[[0.6020608  0.2518794  0.12492344 0.78548336 0.47200355]]
Sigmoid(TensorFlow): (1, 5)
[[0.60206085 0.25187942 0.12492345 0.78548336 0.47200358]]

Tanh(TensorFlow): (1, 5)
[[ 0.39191395 -0.79637164 -0.96005476  0.8611846  -0.1116358 ]]
Tanh(TensorFlow): (1, 5)
[[ 0.39191392 -0.7963717  -0.96005493  0.8611847  -0.1116358 ]]

ReLU(TensorFlow): (1, 5)
[[0.4140593 0.        0.        1.297912  0.       ]]
ReLU(TensorFlow): (1, 5)
[[0.4140593 0.        0.        1.297912  0.       ]]</code></pre><h2 id="code1-3-2-activation-in-dense-layer">Code.1-3-2: Activation in Dense Layer</h2>
<pre><code class="language-python"># In Dense layer, affine + activation

import tensorflow as tf
from tensorflow.math import exp
from tensorflow.keras.layers import Dense   # Dense is a kind of Layers

x = tf.random.normal(shape=(1, 5)) # input setting

# imp. artificial neuron
# make a unit of dense layer in combination of the activation
dense_sigmoid = Dense(units=1, activation=&#39;sigmoid&#39;)
dense_tanh = Dense(units=1, activation=&#39;tanh&#39;)
dense_relu = Dense(units=1, activation=&#39;relu&#39;)

# forward propagation(tensorflow)
y_sigmoid = dense_sigmoid(x)
y_tanh = dense_tanh(x)
y_relu = dense_relu(x)

print(f&#39;x: {x.shape}\n{x.numpy()}&#39;)
print()
print(f&#39;AN with Sigmoid: {y_sigmoid.shape}\n{y_sigmoid.numpy()}&#39;)
print(f&#39;AN with Tanh: {y_tanh.shape}\n{y_tanh.numpy()}&#39;)
print(f&#39;AN with ReLU: {y_relu.shape}\n{y_relu.numpy()}&#39;)

print()
print(&#39;======&#39;)
print()

# forward progataion(manual)
W, B = dense_sigmoid.get_weights()
z = tf.linalg.matmul(x, W) + B
a = 1 / (1 + exp(-z))

print(f&#39;Activation value(TensorFlow): {y_sigmoid.shape}\n{y_sigmoid.numpy()}&#39;)
print(f&#39;Activation value(manual): {a.shape}\n{a.numpy()}&#39;)</code></pre>
<pre><code>x: (1, 5)
[[ 1.1621749 -0.7989249  2.0781152 -0.9444862  0.6221622]]

AN with Sigmoid: (1, 1)
[[0.7378292]]
AN with Tanh: (1, 1)
[[-0.7343226]]
AN with ReLU: (1, 1)
[[0.]]

======

Activation value(TensorFlow): (1, 1)
[[0.7378292]]
Activation value(manual): (1, 1)
[[0.73782927]]</code></pre>]]></description>
        </item>
    </channel>
</rss>